blob: 757b23e08f3072e203318c89f7ef1fcdc5ae72c1 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "ParserFlatbuffersFixture.hpp"
7#include "../TfLiteParser.hpp"
8
9#include <string>
10#include <iostream>
11
Sadik Armagan1625efc2021-06-10 18:24:34 +010012TEST_SUITE("TensorflowLiteParser_DepthwiseConvolution2D")
13{
telsoa01c577f2c2018-08-31 09:22:23 +010014struct DepthwiseConvolution2dFixture : public ParserFlatbuffersFixture
15{
16 explicit DepthwiseConvolution2dFixture(const std::string& inputShape,
17 const std::string& outputShape,
18 const std::string& filterShape,
19 const std::string& filterData,
20 const std::string& strides,
21 const std::string& paddingType,
22 const std::string biasShape = "",
23 const std::string biasData = "")
24 {
25 std::string inputTensors = "[ 0, 2 ]";
26 std::string biasTensor = "";
27 std::string biasBuffer = "";
28 if (biasShape.size() > 0 && biasData.size() > 0)
29 {
30 inputTensors = "[ 0, 2, 3 ]";
31 biasTensor = R"(
32 {
33 "shape": )" + biasShape + R"( ,
34 "type": "INT32",
35 "buffer": 3,
36 "name": "biasTensor",
37 "quantization": {
38 "min": [ 0.0 ],
39 "max": [ 255.0 ],
40 "scale": [ 1.0 ],
41 "zero_point": [ 0 ],
42 }
43 } )";
44 biasBuffer = R"(
45 { "data": )" + biasData + R"(, }, )";
46 }
47 m_JsonString = R"(
48 {
49 "version": 3,
50 "operator_codes": [ { "builtin_code": "DEPTHWISE_CONV_2D" } ],
51 "subgraphs": [ {
52 "tensors": [
53 {
54 "shape": )" + inputShape + R"(,
55 "type": "UINT8",
56 "buffer": 0,
57 "name": "inputTensor",
58 "quantization": {
59 "min": [ 0.0 ],
60 "max": [ 255.0 ],
61 "scale": [ 1.0 ],
62 "zero_point": [ 0 ],
63 }
64 },
65 {
66 "shape": )" + outputShape + R"(,
67 "type": "UINT8",
68 "buffer": 1,
69 "name": "outputTensor",
70 "quantization": {
71 "min": [ 0.0 ],
72 "max": [ 511.0 ],
73 "scale": [ 2.0 ],
74 "zero_point": [ 0 ],
75 }
76 },
77 {
78 "shape": )" + filterShape + R"(,
79 "type": "UINT8",
80 "buffer": 2,
81 "name": "filterTensor",
82 "quantization": {
83 "min": [ 0.0 ],
84 "max": [ 255.0 ],
85 "scale": [ 1.0 ],
86 "zero_point": [ 0 ],
87 }
88 }, )" + biasTensor + R"(
89 ],
90 "inputs": [ 0 ],
91 "outputs": [ 1 ],
92 "operators": [
93 {
94 "opcode_index": 0,
95 "inputs": )" + inputTensors + R"(,
96 "outputs": [ 1 ],
97 "builtin_options_type": "DepthwiseConv2DOptions",
98 "builtin_options": {
99 "padding": ")" + paddingType + R"(",
100 "stride_w": )" + strides+ R"(,
101 "stride_h": )" + strides+ R"(,
102 "depth_multiplier": 1,
103 "fused_activation_function": "NONE"
104 },
105 "custom_options_format": "FLEXBUFFERS"
106 }
107 ],
108 } ],
109 "buffers" : [
110 { },
111 { },
112 { "data": )" + filterData + R"(, }, )"
113 + biasBuffer + R"(
114 ]
115 }
116 )";
117 SetupSingleInputSingleOutput("inputTensor", "outputTensor");
118 }
119};
120
121struct DepthwiseConvolution2dSameFixture : DepthwiseConvolution2dFixture
122{
123 DepthwiseConvolution2dSameFixture()
124 : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
125 "[ 1, 3, 3, 1 ]", // outputShape
126 "[ 1, 3, 3, 1 ]", // filterShape
127 "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
128 "1", // stride w and h
129 "SAME") // padding type
130 {}
131};
132
Sadik Armagan1625efc2021-06-10 18:24:34 +0100133TEST_CASE_FIXTURE(DepthwiseConvolution2dSameFixture, "ParseDepthwiseConv2DSame")
telsoa01c577f2c2018-08-31 09:22:23 +0100134{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000135 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100136 0,
137 { 0, 1, 2,
138 3, 4, 5,
139 6, 7, 8 },
140 // the expected values were generated using the example python implementation at
141 // https://eli.thegreenplace.net/2018/depthwise-separable-convolutions-for-machine-learning/
142 // divide the expected values by the output scale, as it is not 1.0
143 { 14/2, 35/2, 38/2,
144 57/2, 120/2, 111/2,
145 110/2, 197/2, 158/2 });
146}
147
148struct DepthwiseConvolution2dValidFixture : DepthwiseConvolution2dFixture
149{
150 DepthwiseConvolution2dValidFixture ()
151 : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
152 "[ 1, 1, 1, 1 ]", // outputShape
153 "[ 1, 3, 3, 1 ]", // filterShape
154 "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
155 "1", // stride w and h
156 "VALID") // padding type
157 {}
158};
159
Sadik Armagan1625efc2021-06-10 18:24:34 +0100160TEST_CASE_FIXTURE(DepthwiseConvolution2dValidFixture, "ParseDepthwiseConv2DValid")
telsoa01c577f2c2018-08-31 09:22:23 +0100161{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000162 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100163 0,
164 { 0, 1, 2,
165 3, 4, 5,
166 6, 7, 8 },
167 // divide the expected values by the output scale, as it is not 1.0
168 { 120/2 });
169}
170
171struct DepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
172{
173 DepthwiseConvolution2dSameBiasFixture()
174 : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
175 "[ 1, 3, 3, 1 ]", // outputShape
176 "[ 1, 3, 3, 1 ]", // filterShape
177 "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
178 "1", // stride w and h
179 "SAME", // padding type
180 "[ 1 ]", // biasShape
181 "[ 10, 0, 0, 0 ]") // biasData
182 {}
183};
184
Sadik Armagan1625efc2021-06-10 18:24:34 +0100185TEST_CASE_FIXTURE(DepthwiseConvolution2dSameBiasFixture, "ParseDepthwiseConv2DSameBias")
telsoa01c577f2c2018-08-31 09:22:23 +0100186{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000187 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100188 0,
189 { 0, 1, 2,
190 3, 4, 5,
191 6, 7, 8 },
192 // divide the expected values by the output scale, as it is not 1.0
193 { ( 14+10)/2, ( 35+10)/2, ( 38+10)/2,
194 ( 57+10)/2, (120+10)/2, (111+10)/2,
195 (110+10)/2, (197+10)/2, (158+10)/2 });
196}
197
Sadik Armagand109a4d2020-07-28 10:42:13 +0100198struct DynamicDepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
199{
200 DynamicDepthwiseConvolution2dSameBiasFixture()
201 : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
202 "[ ]", // outputShape
203 "[ 1, 3, 3, 1 ]", // filterShape
204 "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
205 "1", // stride w and h
206 "SAME", // padding type
207 "[ 1 ]", // biasShape
208 "[ 10, 0, 0, 0 ]") // biasData
209 {}
210};
211
Sadik Armagan1625efc2021-06-10 18:24:34 +0100212TEST_CASE_FIXTURE(DynamicDepthwiseConvolution2dSameBiasFixture, "ParseDynamicDepthwiseConv2DSameBias")
Sadik Armagand109a4d2020-07-28 10:42:13 +0100213{
214 RunTest<4, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(0,
215 { { "inputTensor", { 0, 1, 2,
216 3, 4, 5,
217 6, 7, 8 } } },
218 { { "outputTensor", { ( 14+10)/2, ( 35+10)/2, ( 38+10)/2,
219 ( 57+10)/2, (120+10)/2, (111+10)/2,
220 (110+10)/2, (197+10)/2, (158+10)/2 } } },
221 true);
222}
223
Jan Eilersf6491492021-04-02 13:06:15 +0100224struct DepthwiseConvolution2dFixture2 : public ParserFlatbuffersFixture
225{
226 explicit DepthwiseConvolution2dFixture2(const std::string& inputShape,
Jan Eilers7612bd62021-04-06 17:29:03 +0100227 const std::string& outputShape,
228 const std::string& filterShape,
229 const std::string& filterData,
230 const std::string& strides,
231 const std::string& paddingType,
232 const std::string biasShape = "",
233 const std::string biasData = "",
234 const std::string filter_quant_min = "[ 0.0 ]",
235 const std::string filter_quant_max = "[ 255.0 ]",
236 const std::string filter_quant_scale = "[ 1.0 ]",
237 const std::string filter_quant_zero_point = "[ 0 ]",
238 const std::string filter_quant_axis = "",
239 const std::string output_scale = "[ 1.0 ]")
Jan Eilersf6491492021-04-02 13:06:15 +0100240 {
241 std::string inputTensors = "[ 0, 2 ]";
242 std::string biasTensor = "";
243 std::string biasBuffer = "";
244 if (biasShape.size() > 0 && biasData.size() > 0)
245 {
246 inputTensors = "[ 0, 2, 3 ]";
247 biasTensor = R"(
248 {
249 "shape": )" + biasShape + R"( ,
250 "type": "INT32",
251 "buffer": 3,
252 "name": "biasTensor",
253 "quantization": {
254 "min": [ 0.0 ],
255 "max": [ 255.0 ],
256 "scale": [ 1.0 ],
257 "zero_point": [ 0 ],
258 }
259 } )";
260 biasBuffer = R"(
261 { "data": )" + biasData + R"(, }, )";
262 }
263
264 std::string filter_qantization =
265 R"(
266 "min": )" + filter_quant_min + R"(,
267 "max": )" + filter_quant_max + R"(,
268 "scale": )" + filter_quant_scale + R"(,
269 "zero_point": )" + filter_quant_zero_point;
270 // A given quantization axis indicates if per channel quantization is used for filters
271 if (filter_quant_axis.size() > 0)
272 {
273 filter_qantization +=
274 R"(,
275 "quantized_dimension": )" + filter_quant_axis;
276 }
277 m_JsonString = R"(
278 {
279 "version": 3,
280 "operator_codes": [ { "builtin_code": "DEPTHWISE_CONV_2D" } ],
281 "subgraphs": [ {
282 "tensors": [
283 {
284 "shape": )" + inputShape + R"(,
285 "type": "INT8",
286 "buffer": 0,
287 "name": "inputTensor",
288 "quantization": {
289 "min": [ 0.0 ],
290 "max": [ 255.0 ],
291 "scale": [ 1.0 ],
292 "zero_point": [ 0 ],
293 }
294 },
295 {
296 "shape": )" + outputShape + R"(,
297 "type": "INT8",
298 "buffer": 1,
299 "name": "outputTensor",
300 "quantization": {
301 "min": [ 0.0 ],
302 "max": [ 511.0 ],
Jan Eilers7612bd62021-04-06 17:29:03 +0100303 "scale": )" + output_scale + R"(,
Jan Eilersf6491492021-04-02 13:06:15 +0100304 "zero_point": [ 0 ],
305 }
306 },
307 {
308 "shape": )" + filterShape + R"(,
309 "type": "INT8",
310 "buffer": 2,
311 "name": "filterTensor",
312 "quantization": {)" + filter_qantization + R"(
313 }
314 }, )" + biasTensor + R"(
315 ],
316 "inputs": [ 0 ],
317 "outputs": [ 1 ],
318 "operators": [
319 {
320 "opcode_index": 0,
321 "inputs": )" + inputTensors + R"(,
322 "outputs": [ 1 ],
323 "builtin_options_type": "DepthwiseConv2DOptions",
324 "builtin_options": {
325 "padding": ")" + paddingType + R"(",
326 "stride_w": )" + strides+ R"(,
327 "stride_h": )" + strides+ R"(,
328 "depth_multiplier": 1,
329 "fused_activation_function": "NONE"
330 },
331 "custom_options_format": "FLEXBUFFERS"
332 }
333 ],
334 } ],
335 "buffers" : [
336 { },
337 { },
338 { "data": )" + filterData + R"(, }, )"
339 + biasBuffer + R"(
340 ]
341 }
342 )";
343 SetupSingleInputSingleOutput("inputTensor", "outputTensor");
344 }
345};
346
347
348// No quantization meaning scale=1.0 and offset=0.0 and tensor quantization
349struct DepthwiseConvolution2dNoQuantFixture : DepthwiseConvolution2dFixture2
350{
351 DepthwiseConvolution2dNoQuantFixture()
352 : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
353 "[ 1, 3, 3, 3 ]", // outputShape
354 "[ 1, 3, 3, 3 ]", // filterShape
355 "[ 9,8,7, 6,5,4, 3,2,1, "
356 "9,8,7, 6,5,4, 3,2,1, "
357 "9,8,7, 6,5,4, 3,2,1 ]", // filterData
358 "1", // stride w and h
359 "SAME", // padding type
360 "", // bias shape
361 "" // bias data
362 )
363 {}
364};
365
366// No quantization meaning scale=1.0 and offset=0.0 and tensor quantization
Sadik Armagan1625efc2021-06-10 18:24:34 +0100367TEST_CASE_FIXTURE(DepthwiseConvolution2dNoQuantFixture, "ParseDepthwiseConv2DNoQuant")
Jan Eilersf6491492021-04-02 13:06:15 +0100368{
369 RunTest<4, armnn::DataType::QAsymmS8>(
370 0,
371 { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
372 { 18, 14, 10, 36, 30, 24, 30, 26, 22, 27, 21, 15, 54, 45,
373 36, 45, 39, 33, 18, 14, 10, 36, 30, 24, 30, 26, 22});
374}
375
376// Uses per channel quantization on weights but with scales = 1.0 and offsets = 0.0
377struct DepthwiseConvolution2dNoChannelQuantFixture : DepthwiseConvolution2dFixture2
378{
379 DepthwiseConvolution2dNoChannelQuantFixture()
380 : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
381 "[ 1, 3, 3, 3 ]", // outputShape
382 "[ 1, 3, 3, 3 ]", // filterShape
Jan Eilers7612bd62021-04-06 17:29:03 +0100383 "[ 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1 ]", //filterData
Jan Eilersf6491492021-04-02 13:06:15 +0100384 "1", // stride w and h
385 "SAME", // padding type
386 "", // bias shape
387 "", // bias data
Jan Eilers7612bd62021-04-06 17:29:03 +0100388 "[ 0.0 ]", // filter quantization min values
Jan Eilersf6491492021-04-02 13:06:15 +0100389 "[ 255.0 ]", // filter quantization max values
390 "[ 1.0, 1.0, 1.0]", // filter quantization scales
391 "[ 0, 0, 0]", // filter quantization zero-points
392 "3" // filter quantized axis
393 // (in case of per channel quantization)
394 )
395 {}
396};
397
398// Uses per channel quantization on weights but with scales = 1.0 and offsets = 0.0
Sadik Armagan1625efc2021-06-10 18:24:34 +0100399TEST_CASE_FIXTURE(DepthwiseConvolution2dNoChannelQuantFixture, "ParseDepthwiseConv2DFilterNoChannelQuant")
Jan Eilersf6491492021-04-02 13:06:15 +0100400{
401 RunTest<4, armnn::DataType::QAsymmS8>(
402 0,
403 { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
404 { 18, 14, 10, 36, 30, 24, 30, 26, 22, 27, 21, 15, 54, 45,
405 36, 45, 39, 33, 18, 14, 10, 36, 30, 24, 30, 26, 22});
406}
407
408// Uses per channel quantization on weights but all scales are set to the same value
409struct DepthwiseConvolution2dWeightsPerChannelQuantFixture : DepthwiseConvolution2dFixture2
410{
411 DepthwiseConvolution2dWeightsPerChannelQuantFixture()
412 : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
413 "[ 1, 3, 3, 3 ]", // outputShape
414 "[ 1, 3, 3, 3 ]", // filterShape
415 // filterData is [ 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1 ]
416 // quantized per channel with q_dim=3
417 "[36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, "
418 "20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12, 8, 4]",
419 "1", // stride w and h
420 "SAME", // padding type
421 "", // bias shape
422 "", // bias data
423 "[ 0.0 ]", // filter quantization min values
424 "[ 255.0 ]", // filter quantization max values
425 "[ 0.25, 0.25, 0.25]", // filter quantization scales
426 "[ 0, 0, 0]", // filter quantization zero-points
427 "3" // filter quantized axis
428 // (in case of per channel quantization)
429 )
430 {}
431};
432
433// Weights are per channel quantized but all scales are set to the same value
Sadik Armagan1625efc2021-06-10 18:24:34 +0100434TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuantFixture,
435 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant")
Jan Eilersf6491492021-04-02 13:06:15 +0100436{
437 RunTest<4, armnn::DataType::QAsymmS8>(
438 0,
439 { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
440 { 18, 14, 10, 36, 30, 24, 30, 26, 22, 27, 21, 15, 54, 45,
441 36, 45, 39, 33, 18, 14, 10, 36, 30, 24, 30, 26, 22});
442}
443
444// Uses per channel quantization on weights all scales are different in this test
445struct DepthwiseConvolution2dWeightsPerChannelQuant1Fixture : DepthwiseConvolution2dFixture2
446{
447 DepthwiseConvolution2dWeightsPerChannelQuant1Fixture()
448 : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
449 "[ 1, 3, 3, 3 ]", // outputShape
450 "[ 1, 3, 3, 3 ]", // filterShape
451 // filterData is [ 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1 ]
452 // quantized per channel with q_dim=3
453 "[36, 40, 70, 24, 25, 40, 12, 10, 10, 36, 40, 70, 24, "
454 "25, 40, 12, 10, 10, 36, 40, 70, 24, 25, 40, 12, 10, 10]",
455 "1", // stride w and h
456 "SAME", // padding type
457 "", // bias shape
458 "", // bias data
459 "[ 0.0 ]", // filter quantization min values
460 "[ 255.0 ]", // filter quantization max values
461 "[ 0.25, 0.2, 0.1]", // filter quantization scales
462 "[ 0, 0, 0]", // filter quantization zero-points
463 "3" // filter quantized axis
464 // (in case of per channel quantization)
465 )
466 {}
467};
468
469// Uses per channel quantization on weights all scales are different in this test
Sadik Armagan1625efc2021-06-10 18:24:34 +0100470TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant1Fixture,
471 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant1")
Jan Eilersf6491492021-04-02 13:06:15 +0100472{
473 RunTest<4, armnn::DataType::QAsymmS8>(
474 0,
475 { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
476 { 18, 14, 10, 36, 30, 24, 30, 26, 22, 27, 21, 15, 54, 45,
477 36, 45, 39, 33, 18, 14, 10, 36, 30, 24, 30, 26, 22});
478}
479
480
481// Uses per channel quantization on weights all scales are different in this test
482// Uses different shape for weights and input compared to the other tests above
483struct DepthwiseConvolution2dWeightsPerChannelQuant2Fixture : DepthwiseConvolution2dFixture2
484{
485 DepthwiseConvolution2dWeightsPerChannelQuant2Fixture()
486 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
487 "[ 1, 4, 4, 4 ]", // outputShape
488 "[ 1, 2, 2, 4 ]", // filterShape
489 // filterData is [ 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3 ]
490 // quantized per channel with q_dim=3
491 "[36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10]",
492 "1", // stride w and h
493 "SAME", // padding type
494 "", // bias shape
495 "", // bias data
496 "[ 0.0 ]", // filter quantization min values
497 "[ 255.0 ]", // filter quantization max values
498 "[ 0.25, 0.2, 0.1, 0.3]", // filter quantization scales
499 "[ 0, 0, 0, 0]", // filter quantization zero-points
500 "3" // filter quantized axis
501 // (in case of per channel quantization)
502 )
503 {}
504};
505
506// Uses per channel quantization on weights all scales are different in this test
507// Uses different shape for weights and input compared to the other tests above
Sadik Armagan1625efc2021-06-10 18:24:34 +0100508TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant2Fixture,
509 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant2")
Jan Eilersf6491492021-04-02 13:06:15 +0100510{
511 RunTest<4, armnn::DataType::QAsymmS8>(
512 0,
513 { 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
514 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
515 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
516 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1},
517 { 21, 26, 22, 18, 21, 26, 22, 18, 21, 26, 22, 18, 10, 17, 15, 13,
518 21, 26, 22, 18, 21, 26, 22, 18, 21, 26, 22, 18, 10, 17, 15, 13,
519 21, 26, 22, 18, 21, 26, 22, 18, 21, 26, 22, 18, 10, 17, 15, 13,
520 14, 12, 10, 8, 14, 12, 10, 8, 14, 12, 10, 8, 9, 8, 7, 6});
521}
522
523// Test for depthwise_multiplier different to one (M > 1)
524struct DepthwiseConvolution2dWeightsPerChannelQuant4Fixture : DepthwiseConvolution2dFixture2
525{
526 DepthwiseConvolution2dWeightsPerChannelQuant4Fixture()
527 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
528 "[ 1, 4, 4, 16 ]", // outputShape
529 "[ 1, 2, 2, 16 ]", // filterShape
530 // filter data is [ 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3,
531 // 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3,
532 // 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3,
533 // 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3 ]
534 // quantized per channel with q_dim=3
535 "[36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10, "
536 "36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10, "
537 "36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10, "
538 "36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10]",
539 "1", // stride w and h
540 "SAME", // padding type
541 "", // bias shape
542 "", // bias data
543 "[ 0.0 ]", // filter quantization min values
544 "[ 255.0 ]", // filter quantization max values
545 "[ 0.25, 0.2, 0.1, 0.3,"
546 "0.25, 0.2, 0.1, 0.3,"
547 "0.25, 0.2, 0.1, 0.3,"
548 "0.25, 0.2, 0.1, 0.3]", // filter quantization scales
549 "[ 0, 0, 0, 0]", // filter quantization zero-points
550 "3" // filter quantized axis
551 // (in case of per channel quantization)
552 )
553 {}
554};
555
556// Test for depthwise_multiplier different to one (M > 1)
Sadik Armagan1625efc2021-06-10 18:24:34 +0100557TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4Fixture,
558 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4")
Jan Eilersf6491492021-04-02 13:06:15 +0100559{
560 RunTest<4, armnn::DataType::QAsymmS8>(
561 0,
562 { 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
563 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
564 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
565 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1},
566 { 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
567 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
568 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
569 18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
570 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
571 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
572 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
573 18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
574 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
575 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
576 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
577 18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
578 18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
579 18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
580 18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
581 9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3});
582}
583
Jan Eilers7612bd62021-04-06 17:29:03 +0100584
585struct DepthwiseConvolution2dWeightsPerChannelQuant6Fixture : DepthwiseConvolution2dFixture2
586{
587 DepthwiseConvolution2dWeightsPerChannelQuant6Fixture()
588 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
589 "[ 1, 4, 4, 16 ]", // outputShape
590 "[ 1, 2, 2, 16 ]", // filterShape
591 // filter data is [ 3,4,1,1,1,3,3,2,1,4,3,4,1,2,2,4,
592 // 2,0,3,1,0,2,4,3,4,3,0,1,3,4,4,1,
593 // 3,3,2,0,0,0,1,3,3,2,4,4,3,1,1,3,
594 // 1,0,0,2,3,0,1,1,4,2,2,1,2,3,2,0]
595 // quantized per channel with q_dim=3
596 "[12,20,10, 3, 4,15,30, 6, 4,20,30,12, 4,10,20,12,"
597 " 8, 0,30, 3, 0,10,40, 9,16,15, 0, 3,12,20,40, 3,"
598 " 12,15,20, 0, 0, 0,10, 9,12,10,40,12,12, 5,10, 9,"
599 " 4, 0, 0, 6,12, 0,10, 3,16,10,20, 3, 8,15,20, 0]",
600 "1", // stride w and h
601 "SAME", // padding type
602 "", // bias shape
603 "", // bias data
604 "[ 0.0 ]", // filter quantization min values
605 "[ 255.0 ]", // filter quantization max values
606 "[ 0.25, 0.2, 0.1, 0.333333333,"
607 "0.25, 0.2, 0.1, 0.333333333,"
608 "0.25, 0.2, 0.1, 0.333333333,"
609 "0.25, 0.2, 0.1, 0.333333333]", // filter quantization scales
610 "[ 0, 0, 0, 0]", // filter quantization zero-points
611 "3" // filter quantized axis
612 // (in case of per channel quantization)
613 )
614 {}
615};
616
617
Sadik Armagan1625efc2021-06-10 18:24:34 +0100618TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant6Fixture,
619 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant6")
Jan Eilers7612bd62021-04-06 17:29:03 +0100620{
621 RunTest<4, armnn::DataType::QAsymmS8>(
622 0,
623 { 1,0,1,2,0,4,4,0,2,1,2,0,1,3,3,0,
624 1,2,2,3,3,4,1,1,2,4,1,3,4,2,0,2,
625 0,3,1,3,4,3,2,0,1,2,3,3,0,2,4,2,
626 1,2,1,4,3,4,1,3,1,0,2,3,1,3,2,0},
627 { 9, 7, 3, 7,12, 8,22,22,27,22,13,17,13,10, 9,17,
628 15, 9,12, 6,16,14,24,27,19,26,18,23, 9,10, 7, 3,
629 18,14, 9,11, 7, 9,21,25,17,19,10,15,13, 9, 7, 9,
630 15,16, 9, 1, 3, 9,11,12, 3,12, 9,12, 6, 2, 2, 6,
631 13, 4,10,12,11,14,28,28,17,17,14,15,15,13,13,22,
632 26,24,17, 7,10,20,33,31,23,17,17,16,16,23,20, 7,
633 17,11,16, 6,10,16,24,22,26,18,23,20,22,23,21,23,
634 12,16, 4, 4, 2, 6, 8,10,12, 8,16,16, 8, 6, 6,14,
635 14, 3,14,10,15,15,27,25,16,14, 9,11,21,19,16,24,
636 24,25,13, 7, 3,13,21,24,25,23,14,17,24,24,21,12,
637 7, 7, 3, 3,11,10,17,13,33,32,21,26,18,17,17,23,
638 3, 3, 2, 0, 2, 6, 9,13,10,20,20,24, 2, 4, 4, 8,
639 9, 4,10, 4, 2,14,22,16, 5, 7, 3, 5,13,20,20,19,
640 11,12, 6, 4, 4,12,12, 8, 9,10, 3, 6,12,18,18,15,
641 5, 4, 4, 2, 0, 6,12, 9,10,14, 6,10, 3, 6, 6,12,
642 3, 4, 1, 1, 3, 9, 9, 6, 2, 8, 6, 8, 0, 0, 0, 0});
643}
644
645
646struct DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture : DepthwiseConvolution2dFixture2
647{
648 DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture()
649 : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
650 "[ 1, 3, 3, 3 ]", // outputShape
651 "[ 1, 3, 3, 3 ]", // filterShape
652 // filterData is [ 1,4,0,2,4,3,1,0,1,
653 // 3,0,4,0,1,3,4,2,4,
654 // 3,0,3,4,4,0,3,4,2]
655 // quantized per channel with q_dim=3
656 "[ 4,20, 0, 8,20,30, 4, 0,10,12,"
657 " 0,40, 0, 5,30,16,10,40,12, 0,"
658 "30,16,20, 0,12,20,20]",
659 "1", // stride w and h
660 "SAME", // padding type
661 "", // bias shape
662 "", // bias data
663 "[ 0.0 ]", // filter quantization min values
664 "[ 255.0 ]", // filter quantization max values
665 "[ 0.25, 0.2, 0.1]", // filter quantization scales
666 "[ 0, 0, 0]", // filter quantization zero-points
667 "3" // filter quantized axis
668 // (in case of per channel quantization)
669 )
670 {}
671};
672
673
Sadik Armagan1625efc2021-06-10 18:24:34 +0100674TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture,
675 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_1")
Jan Eilers7612bd62021-04-06 17:29:03 +0100676{
677 RunTest<4, armnn::DataType::QAsymmS8>(
678 0,
679 { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
680 { 11,11, 9,17,11,16,10, 5,10,
681 14,15,13,21,19,20,13,13,13,
682 7, 7,11,11,11,15, 6, 9,10});
683}
684
685// Same with input different to 1
686struct DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture : DepthwiseConvolution2dFixture2
687{
688 DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture()
689 : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
690 "[ 1, 3, 3, 3 ]", // outputShape
691 "[ 1, 3, 3, 3 ]", // filterShape
692 // filterData is [ 1,4,0,2,4,3,1,0,1,
693 // 3,0,4,0,1,3,4,2,4,
694 // 3,0,3,4,4,0,3,4,2]
695 // quantized per channel with q_dim=3
696 "[ 4,20, 0, 8,20,30, 4, 0,10,12,"
697 " 0,40, 0, 5,30,16,10,40,12, 0,"
698 "30,16,20, 0,12,20,20]",
699 "1", // stride w and h
700 "SAME", // padding type
701 "", // bias shape
702 "", // bias data
703 "[ 0.0 ]", // filter quantization min values
704 "[ 255.0 ]", // filter quantization max values
705 "[ 0.25, 0.2, 0.1]", // filter quantization scales
706 "[ 0, 0, 0]", // filter quantization zero-points
707 "3" // filter quantized axis
708 // (in case of per channel quantization)
709 )
710 {}
711};
712
713
Sadik Armagan1625efc2021-06-10 18:24:34 +0100714TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture,
715 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_2")
Jan Eilers7612bd62021-04-06 17:29:03 +0100716{
717 RunTest<4, armnn::DataType::QAsymmS8>(
718 0,
719 { 3,2,0,0,4,3,0,1,2,
720 0,1,3,0,4,2,2,2,3,
721 2,4,3,2,0,4,3,4,0},
722 { 0,30,16,15,30,32, 8, 9,24,
723 20,33,28,34,48,50,18,38,35,
724 8, 8,36,20,28,33,10,28,25});
725}
726
727
728struct DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture : DepthwiseConvolution2dFixture2
729{
730 DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture()
731 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
732 "[ 1, 4, 4, 16 ]", // outputShape
733 "[ 1, 2, 2, 16 ]", // filterShape
734 // filter data is [ 3,4,1,1,1,3,3,2,1,4,3,4,1,2,2,4,
735 // 2,0,3,1,0,2,4,3,4,3,0,1,3,4,4,1,
736 // 3,3,2,0,0,0,1,3,3,2,4,4,3,1,1,3,
737 // 1,0,0,2,3,0,1,1,4,2,2,1,2,3,2,0 ]
738 // quantized per channel with q_dim=3
739 "[12,20,10, 3, 4,15,30, 6, 4,20,30,13, 4,10,20,13,"
740 " 8, 0,30, 3, 0,10,40,10,16,15, 0, 3,12,20,40, 3,"
741 " 12,15,20, 0, 0, 0,10,10,12,10,40,13,12, 5,10,10,"
742 " 4, 0, 0, 6,12, 0,10, 3,16,10,20, 3, 8,15,20, 0]",
743 "1", // stride w and h
744 "SAME", // padding type
745 "", // bias shape
746 "", // bias data
747 "[ 0.0 ]", // filter quantization min values
748 "[ 255.0 ]", // filter quantization max values
749 "[ 0.25, 0.2, 0.1, 0.3,"
750 "0.25, 0.2, 0.1, 0.3,"
751 "0.25, 0.2, 0.1, 0.3,"
752 "0.25, 0.2, 0.1, 0.3]", // filter quantization scales
753 "[ 0, 0, 0, 0]", // filter quantization zero-points
754 "3" // filter quantized axis
755 // (in case of per channel quantization)
756 )
757 {}
758};
759
760
Sadik Armagan1625efc2021-06-10 18:24:34 +0100761TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture,
762 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_1")
Jan Eilers7612bd62021-04-06 17:29:03 +0100763{
764 RunTest<4, armnn::DataType::QAsymmS8>(
765 0,
766 { 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
767 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
768 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
769 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1},
770 { 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
771 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
772 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
773 6, 7, 3, 1, 1, 3, 4, 5, 4, 6, 7, 8, 4, 3, 3, 7,
774 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
775 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
776 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
777 6, 7, 3, 1, 1, 3, 4, 5, 4, 6, 7, 8, 4, 3, 3, 7,
778 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
779 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
780 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
781 6, 7, 3, 1, 1, 3, 4, 5, 4, 6, 7, 8, 4, 3, 3, 7,
782 5, 4, 4, 2, 1, 5, 7, 5, 5, 7, 3, 5, 4, 6, 6, 5,
783 5, 4, 4, 2, 1, 5, 7, 5, 5, 7, 3, 5, 4, 6, 6, 5,
784 5, 4, 4, 2, 1, 5, 7, 5, 5, 7, 3, 5, 4, 6, 6, 5,
785 3, 4, 1, 1, 1, 3, 3, 2, 1, 4, 3, 4, 1, 2, 2, 4});
786}
787
788
789
790struct DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture : DepthwiseConvolution2dFixture2
791{
792 DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture()
793 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
794 "[ 1, 4, 4, 16 ]", // outputShape
795 "[ 1, 2, 2, 16 ]", // filterShape
796 // filter data is [ 3,4,1,1,1,3,3,2,1,4,3,4,1,2,2,4,
797 // 2,0,3,1,0,2,4,3,4,3,0,1,3,4,4,1,
798 // 3,3,2,0,0,0,1,3,3,2,4,4,3,1,1,3,
799 // 1,0,0,2,3,0,1,1,4,2,2,1,2,3,2,0 ]
800 // quantized per channel with q_dim=3
801 "[12,20,10, 3, 4,15,30, 6, 4,20,30,13, 4,10,20,13,"
802 " 8, 0,30, 3, 0,10,40,10,16,15, 0, 3,12,20,40, 3,"
803 " 12,15,20, 0, 0, 0,10,10,12,10,40,13,12, 5,10,10,"
804 " 4, 0, 0, 6,12, 0,10, 3,16,10,20, 3, 8,15,20, 0]",
805 "1", // stride w and h
806 "SAME", // padding type
807 "", // bias shape
808 "", // bias data
809 "[ 0.0 ]", // filter quantization min values
810 "[ 255.0 ]", // filter quantization max values
811 "[ 0.25, 0.2, 0.1, 0.3,"
812 "0.25, 0.2, 0.1, 0.3,"
813 "0.25, 0.2, 0.1, 0.3,"
814 "0.25, 0.2, 0.1, 0.3]", // filter quantization scales
815 "[ 0, 0, 0, 0]", // filter quantization zero-points
816 "3" // filter quantized axis
817 // (in case of per channel quantization)
818 )
819 {}
820};
821
822
Sadik Armagan1625efc2021-06-10 18:24:34 +0100823TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture,
824 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_2")
Jan Eilers7612bd62021-04-06 17:29:03 +0100825{
826 RunTest<4, armnn::DataType::QAsymmS8>(
827 0,
828 { 3,3,3,4, 4,4,0,0, 0,3,4,3, 0,2,2,3,
829 3,0,3,0, 0,3,2,1, 4,1,2,2, 0,0,0,4,
830 3,2,2,2, 2,1,0,4, 4,3,2,4, 3,2,0,0,
831 4,1,4,4, 1,0,4,3, 3,2,0,3, 1,1,0,2},
832 { 26,21,21, 7,12,17,28,21,20,22,25,26, 6,11,10,16,
833 16,16, 4,12, 7,18,28,27,30,20,12,14,16,19,17, 6,
834 12,12, 8, 0, 3,13,18,15,18,26,20,26,26,32,28,21,
835 0, 0, 0, 0, 2, 6, 6, 4, 2, 8, 6, 8,15,10,10,24,
836 20,21, 9, 7, 3, 6,15,16,17,22,17,22,17,18,14, 7,
837 18, 6,16,12,12,11,17,15,18,18,10,12,27,26,22,18,
838 27,28,12,10, 7, 3, 8,13, 8,12,14,16,26,24,24,24,
839 9, 9, 6, 0, 0, 0, 2, 6, 0, 0, 0, 0, 4, 8, 8,16,
840 26,24,17, 7, 2, 8,11,10,30,24,30,28,32,33,30,24,
841 20,11,16,12, 7, 9,17,13,20,14,16,18,31,36,33,29,
842 28,25,19, 9, 6,13,20,19, 2, 8, 6, 8,17,17,15,25,
843 12,15, 5, 3, 2, 6, 7, 7, 0, 0, 0, 0, 6, 2, 2, 6,
844 14,16, 7, 5, 1, 3, 3, 2,20,28,12,20,13,20,20,19,
845 9, 4,10, 4, 0, 4, 8, 6, 4,16,12,16,12,18,18,15,
846 11,12, 6, 4, 2, 8,10, 7, 0, 0, 0, 0, 9,14,14,14,
847 3, 4, 1, 1, 1, 3, 3, 2, 0, 0, 0, 0, 2, 4, 4, 8});
848}
849
850
851struct DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture : DepthwiseConvolution2dFixture2
852{
853 DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture()
854 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
855 "[ 1, 4, 4, 16 ]", // outputShape
856 "[ 1, 2, 2, 16 ]", // filterShape
857 // filter data is [ 1, 4, 9, 16, 25, 36,
858 // 49, 64, 81, 100, 121, 144,
859 // 169, 196, 225, 256, 17, 36,
860 // 57, 80, 105, 132, 161, 192,
861 // 225, 260, 297, 336, 377, 420,
862 // 465, 512, 33, 68, 105, 144,
863 // 185, 228, 273, 320, 369, 420,
864 // 473, 528, 585, 644, 705, 768,
865 // 49, 100, 153, 208, 265, 324,
866 // 385, 448, 513, 580, 649, 720,
867 // 793, 868, 945,1024 ]
868 // quantized per channel with q_dim=3
869 "[ 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,"
870 " 17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,"
871 " 33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,"
872 "49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64]",
873 "1", // stride w and h
874 "SAME", // padding type
875 "", // bias shape
876 "", // bias data
877 "[ 0.0 ]", // filter quantization min values
878 "[ 255.0 ]", // filter quantization max values
879 "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16]", // filter quantization scales
880 "[ 0, 0, 0, 0]", // filter quantization zero-points
881 "3", // filter quantized axis
882 // (in case of per channel quantization)
883 "[ 100.0 ]" // output scale
884 )
885 {}
886};
887
888// Test for depthwise_multiplier different to one (M > 1)
Sadik Armagan1625efc2021-06-10 18:24:34 +0100889TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture,
890 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_5")
Jan Eilers7612bd62021-04-06 17:29:03 +0100891{
892 RunTest<4, armnn::DataType::QAsymmS8>(
893 0,
894 { 1,1,1,2,2,2,1,2,1,2,2,1,2,2,1,1,1,1,1,1,1,2,2,2,
895 1,2,2,2,1,1,1,2,1,1,1,1,2,1,2,1,2,1,1,2,1,2,1,1,
896 1,2,2,1,2,2,1,1,2,1,2,1,1,2,1,2},
897 { 1, 2, 3, 5, 9,11,14,16,17,19,21,24,32,36,39,43,
898 1, 2, 3, 4,11,14,17,20,22,26,29,33,34,38,42,46,
899 1, 2, 3, 5, 8,11,13,16,16,18,21,24,33,36,39,43,
900 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6,13,14,16,17,
901 1, 3, 4, 6, 6, 8,10,12,19,22,24,27,23,25,28,30,
902 1, 3, 5, 8, 7, 8,10,12,18,21,24,27,32,36,39,43,
903 1, 2, 4, 5, 8,10,13,15,12,14,16,18,30,33,37,40,
904 0, 0, 1, 1, 3, 4, 5, 7, 4, 5, 5, 6, 9,10,11,12,
905 1, 3, 5, 7,10,12,15,17,17,20,23,25,19,21,23,25,
906 2, 4, 6, 8, 7, 9,11,13,17,20,23,25,23,25,28,30,
907 1, 2, 4, 6, 9,11,14,16,15,17,20,22,28,31,35,38,
908 0, 0, 1, 1, 4, 5, 6, 7, 4, 5, 5, 6,13,14,16,17,
909 0, 0, 1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 5, 6, 6, 7,
910 0, 0, 1, 1, 1, 2, 2, 3, 5, 6, 7, 8, 5, 6, 6, 7,
911 0, 0, 0, 1, 2, 3, 3, 4, 3, 4, 5, 6, 9,10,11,12,
912 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 3, 3, 4, 5});
913}
914
915
916struct DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture : DepthwiseConvolution2dFixture2
917{
918 DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture()
919 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
920 "[ 1, 4, 4, 16 ]", // outputShape
921 "[ 1, 2, 2, 16 ]", // filterShape
922 // filter data is [ 3,4,1,1,1,3,3,2,1,4,3,4,1,2,2,4,
923 // 2,0,3,1,0,2,4,3,4,3,0,1,3,4,4,1,
924 // 3,3,2,0,0,0,1,3,3,2,4,4,3,1,1,3,
925 // 1,0,0,2,3,0,1,1,4,2,2,1,2,3,2,0 ]
926 // quantized per channel with q_dim=3
927 "[12,20,10, 3, 2,24, 9,10, 5,16,30,12, 3,10, 4,32,"
928 " 8, 0,30, 3, 0,16,12,15,20,12, 0, 3, 9,20, 8, 8,"
929 " 12,15,20, 0, 0, 0, 3,15,15, 8,40,12, 9, 5, 2,24,"
930 " 4, 0, 0, 6, 6, 0, 3, 5,20, 8,20, 3, 6,15, 4, 0]",
931 "1", // stride w and h
932 "SAME", // padding type
933 "", // bias shape
934 "", // bias data
935 "[ 0.0 ]", // filter quantization min values
936 "[ 255.0 ]", // filter quantization max values
937 "[0.25, 0.2, 0.1, 0.3333333333, "
938 "0.5, 0.125, 0.33333333, 0.2, "
939 "0.2, 0.25, 0.1, 0.333333333, "
940 "0.3333333333, 0.2, 0.5, 0.125]", // filter quantization scales
941 "[ 0, 0, 0, 0]", // filter quantization zero-points
942 "3" // filter quantized axis
943 // (in case of per channel quantization)
944 )
945 {}
946};
947
948// Test for depthwise_multiplier different to one (M > 1)
Sadik Armagan1625efc2021-06-10 18:24:34 +0100949TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture,
950 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_3_1")
Jan Eilers7612bd62021-04-06 17:29:03 +0100951{
952 RunTest<4, armnn::DataType::QAsymmS8>(
953 0,
954 { 3,3,3,4, 4,4,0,0, 0,3,4,3, 0,2,2,3,
955 3,0,3,0, 0,3,2,1, 4,1,2,2, 0,0,0,4,
956 3,2,2,2, 2,1,0,4, 4,3,2,4, 3,2,0,0,
957 4,1,4,4, 1,0,4,3, 3,2,0,3, 1,1,0,2},
958 { 26,21,21, 7,12,17,28,21,20,22,25,26, 6,11,10,16,
959 16,16, 4,12, 7,18,28,27,30,20,12,14,16,19,17, 6,
960 12,12, 8, 0, 3,13,18,15,18,26,20,26,26,32,28,21,
961 0, 0, 0, 0, 2, 6, 6, 4, 2, 8, 6, 8,15,10,10,24,
962 20,21, 9, 7, 3, 6,15,16,17,22,17,22,17,18,14, 7,
963 18, 6,16,12,12,11,17,15,18,18,10,12,27,26,22,18,
964 27,28,12,10, 7, 3, 8,13, 8,12,14,16,26,24,24,24,
965 9, 9, 6, 0, 0, 0, 2, 6, 0, 0, 0, 0, 4, 8, 8,16,
966 26,24,17, 7, 2, 8,11,10,30,24,30,28,32,33,30,24,
967 20,11,16,12, 7, 9,17,13,20,14,16,18,31,36,33,29,
968 28,25,19, 9, 6,13,20,19, 2, 8, 6, 8,17,17,15,25,
969 12,15, 5, 3, 2, 6, 7, 7, 0, 0, 0, 0, 6, 2, 2, 6,
970 14,16, 7, 5, 1, 3, 3, 2,20,28,12,20,13,20,20,19,
971 9, 4,10, 4, 0, 4, 8, 6, 4,16,12,16,12,18,18,15,
972 11,12, 6, 4, 2, 8,10, 7, 0, 0, 0, 0, 9,14,14,14,
973 3, 4, 1, 1, 1, 3, 3, 2, 0, 0, 0, 0, 2, 4, 4, 8});
974}
975
Sadik Armagan1625efc2021-06-10 18:24:34 +0100976}