blob: ededbc7bace48468d3b6383ea48dad14ce6960c6 [file] [log] [blame]
Sadik Armagan32ca1442020-11-13 17:51:56 +00001//
Colm Donelan7bcae3c2024-01-22 10:07:14 +00002// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan32ca1442020-11-13 17:51:56 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan81ec9942021-10-12 10:26:30 +01008#include "TestUtils.hpp"
9
Sadik Armagan32ca1442020-11-13 17:51:56 +000010#include <armnn_delegate.hpp>
Matthew Sloyanebe392d2023-03-30 10:12:08 +010011#include <DelegateTestInterpreter.hpp>
Sadik Armagan32ca1442020-11-13 17:51:56 +000012
Sadik Armagan32ca1442020-11-13 17:51:56 +000013#include <tensorflow/lite/version.h>
14
Sadik Armagan32ca1442020-11-13 17:51:56 +000015namespace
16{
17
18template <typename T, typename B = float>
19std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOperatorCode,
20 tflite::TensorType tensorType,
21 uint32_t strideX,
22 uint32_t strideY,
23 uint32_t dilationX,
24 uint32_t dilationY,
25 tflite::Padding padding,
26 tflite::ActivationFunctionType fused_activation_function,
27 const std::vector <int32_t>& inputTensorShape,
28 const std::vector <int32_t>& filterTensorShape,
29 const std::vector <int32_t>& biasTensorShape,
30 const std::vector <int32_t>& outputTensorShape,
31 const std::vector <T>& filterData,
32 const std::vector <B>& biasData,
Jan Eilers7612bd62021-04-06 17:29:03 +010033 const std::vector<float> biasScales = {1.0f},
34 const std::vector<int64_t> biasOffsets = {0},
35 const std::vector<float> filterScales = {1.0f},
36 const std::vector<int64_t> filterOffsets = {0},
Sadik Armagan32ca1442020-11-13 17:51:56 +000037 float outputQuantScale = 2.0f,
38 int outputQuantOffset = 0,
39 float quantScale = 1.0f,
40 int quantOffset = 0,
Jan Eilers7612bd62021-04-06 17:29:03 +010041 int32_t depth_multiplier = 1,
42 int32_t filterQuantizationDim = 0)
Sadik Armagan32ca1442020-11-13 17:51:56 +000043{
44 using namespace tflite;
45 flatbuffers::FlatBufferBuilder flatBufferBuilder;
46
Ryan OShea238ecd92023-03-07 11:44:23 +000047 std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
48 buffers[0] = CreateBuffer(flatBufferBuilder);
49 buffers[1] = CreateBuffer(flatBufferBuilder);
50 buffers[2] = CreateBuffer(flatBufferBuilder,
Sadik Armagan32ca1442020-11-13 17:51:56 +000051 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
52 sizeof(T) * filterData.size()));
53
Ryan OShea238ecd92023-03-07 11:44:23 +000054 buffers[3] = CreateBuffer(flatBufferBuilder,
Sadik Armagan32ca1442020-11-13 17:51:56 +000055 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
56 sizeof(B) * biasData.size()));
Ryan OShea238ecd92023-03-07 11:44:23 +000057 buffers[4] = CreateBuffer(flatBufferBuilder);
Sadik Armagan32ca1442020-11-13 17:51:56 +000058
59 auto quantizationParameters =
60 CreateQuantizationParameters(flatBufferBuilder,
61 0,
62 0,
63 flatBufferBuilder.CreateVector<float>({ quantScale }),
64 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
65 auto outputQuantizationParameters =
66 CreateQuantizationParameters(flatBufferBuilder,
67 0,
68 0,
69 flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
70 flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
Jan Eilers7612bd62021-04-06 17:29:03 +010071
Sadik Armagan32ca1442020-11-13 17:51:56 +000072 auto filterQuantizationParameters =
Jan Eilers7612bd62021-04-06 17:29:03 +010073 CreateQuantizationParameters(flatBufferBuilder,
74 0,
75 0,
76 flatBufferBuilder.CreateVector<float>(filterScales),
77 flatBufferBuilder.CreateVector<int64_t>(filterOffsets),
78 tflite::QuantizationDetails_NONE,
79 0,
80 filterQuantizationDim);
81
82 auto biasQuantizationParameters =
83 CreateQuantizationParameters(flatBufferBuilder,
84 0,
85 0,
86 flatBufferBuilder.CreateVector<float>(biasScales),
87 flatBufferBuilder.CreateVector<int64_t>(biasOffsets));
Sadik Armagan32ca1442020-11-13 17:51:56 +000088
89 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
90 tensors[0] = CreateTensor(flatBufferBuilder,
91 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
92 inputTensorShape.size()),
93 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000094 1,
Sadik Armagan32ca1442020-11-13 17:51:56 +000095 flatBufferBuilder.CreateString("input"),
96 quantizationParameters);
97 tensors[1] = CreateTensor(flatBufferBuilder,
98 flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
99 filterTensorShape.size()),
100 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +0000101 2,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000102 flatBufferBuilder.CreateString("filter"),
103 filterQuantizationParameters);
104
105 auto biasTensorType = ::tflite::TensorType_FLOAT32;
Jan Eilerseb616122020-11-20 11:59:40 +0000106 if (tensorType == ::tflite::TensorType_INT8 || tensorType == ::tflite::TensorType_UINT8)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000107 {
108 biasTensorType = ::tflite::TensorType_INT32;
109 }
110 tensors[2] = CreateTensor(flatBufferBuilder,
111 flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
112 biasTensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +0000113 3,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000114 flatBufferBuilder.CreateString("bias"),
Jan Eilers7612bd62021-04-06 17:29:03 +0100115 biasQuantizationParameters);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000116 tensors[3] = CreateTensor(flatBufferBuilder,
117 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
118 outputTensorShape.size()),
119 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +0000120 4,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000121 flatBufferBuilder.CreateString("output"),
122 outputQuantizationParameters);
123
124 flatbuffers::Offset<void> operatorBuiltinOptions;
125 tflite::BuiltinOptions operatorBuiltinOptionsType;
126
127 if(convolutionOperatorCode == tflite::BuiltinOperator_DEPTHWISE_CONV_2D)
128 {
129 operatorBuiltinOptionsType = tflite::BuiltinOptions_DepthwiseConv2DOptions;
130 operatorBuiltinOptions = CreateDepthwiseConv2DOptions(flatBufferBuilder,
131 padding,
132 strideX,
133 strideY,
134 depth_multiplier,
135 fused_activation_function,
136 dilationX,
137 dilationY).Union();
138 }
139 if(convolutionOperatorCode == tflite::BuiltinOperator_CONV_2D)
140 {
141 operatorBuiltinOptionsType = tflite::BuiltinOptions_Conv2DOptions;
142 operatorBuiltinOptions = CreateConv2DOptions(flatBufferBuilder,
143 padding,
144 strideX,
145 strideY,
146 fused_activation_function,
147 dilationX,
148 dilationY).Union();
149 }
150
151 // create operator
Keith Davis892fafe2020-11-26 17:40:35 +0000152 const std::vector<int> operatorInputs{0, 1, 2};
153 const std::vector<int> operatorOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000154 flatbuffers::Offset <Operator> convolutionOperator =
155 CreateOperator(flatBufferBuilder,
156 0,
157 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
158 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
159 operatorBuiltinOptionsType,
160 operatorBuiltinOptions);
161
Keith Davis892fafe2020-11-26 17:40:35 +0000162 const std::vector<int> subgraphInputs{0, 1, 2};
163 const std::vector<int> subgraphOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000164 flatbuffers::Offset <SubGraph> subgraph =
165 CreateSubGraph(flatBufferBuilder,
166 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
167 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
168 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
169 flatBufferBuilder.CreateVector(&convolutionOperator, 1));
170
171 flatbuffers::Offset <flatbuffers::String> modelDescription =
172 flatBufferBuilder.CreateString("ArmnnDelegate: Convolution2d Operator Model");
173 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, convolutionOperatorCode);
174
175 flatbuffers::Offset <Model> flatbufferModel =
176 CreateModel(flatBufferBuilder,
177 TFLITE_SCHEMA_VERSION,
178 flatBufferBuilder.CreateVector(&operatorCode, 1),
179 flatBufferBuilder.CreateVector(&subgraph, 1),
180 modelDescription,
181 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
182
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100183 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000184
185 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
186 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
187}
188
189template <typename T, typename B = float>
190void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode,
191 tflite::TensorType tensorType,
192 uint32_t strideX,
193 uint32_t strideY,
194 uint32_t dilationX,
195 uint32_t dilationY,
196 tflite::Padding padding,
197 tflite::ActivationFunctionType fused_activation_function,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000198 std::vector<int32_t>& inputShape,
199 std::vector<int32_t>& filterShape,
200 std::vector<int32_t>& outputShape,
201 std::vector<T>& inputValues,
202 std::vector<T>& filterValues,
203 std::vector<T>& expectedOutputValues,
204 const std::vector<int32_t>& biasShape = {},
205 const std::vector<B>& biasValues = {},
Jan Eilers7612bd62021-04-06 17:29:03 +0100206 const std::vector<float> biasScales = {1.0f},
207 const std::vector<int64_t> biasOffsets = {0},
208 const std::vector<float> filterScales = {1.0f},
209 const std::vector<int64_t> filterOffsets = {0},
Sadik Armagan32ca1442020-11-13 17:51:56 +0000210 float outputQuantScale = 2.0f,
211 int outputQuantOffset = 0,
212 float quantScale = 1.0f,
213 int quantOffset = 0,
Jan Eilers7612bd62021-04-06 17:29:03 +0100214 int32_t depth_multiplier = 1,
Colm Donelaneff204a2023-11-28 15:46:09 +0000215 int32_t filterQuantizationDim = 3,
216 const std::vector<armnn::BackendId>& backends = {})
Sadik Armagan32ca1442020-11-13 17:51:56 +0000217{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100218 using namespace delegateTestInterpreter;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000219
220 std::vector<char> modelBuffer;
221 modelBuffer = CreateConv2dTfLiteModel(convolutionOperatorCode,
222 tensorType,
223 strideX,
224 strideY,
225 dilationX,
226 dilationY,
227 padding,
228 fused_activation_function,
229 inputShape,
230 filterShape,
231 biasShape,
232 outputShape,
233 filterValues,
234 biasValues,
Jan Eilers7612bd62021-04-06 17:29:03 +0100235 biasScales,
236 biasOffsets,
237 filterScales,
238 filterOffsets,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000239 outputQuantScale,
240 outputQuantOffset,
241 quantScale,
242 quantOffset,
Jan Eilers7612bd62021-04-06 17:29:03 +0100243 depth_multiplier,
244 filterQuantizationDim);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000245
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100246 // Setup interpreter with just TFLite Runtime.
247 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
248 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
249 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
250 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
251 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
252 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000253
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100254 // Setup interpreter with Arm NN Delegate applied.
Colm Donelaneff204a2023-11-28 15:46:09 +0000255 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100256 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
257 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
258 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
259 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
260 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000261
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100262 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
263 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000264
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100265 tfLiteInterpreter.Cleanup();
266 armnnInterpreter.Cleanup();
Sadik Armagan32ca1442020-11-13 17:51:56 +0000267}
268
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100269// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
270#if defined(ARMNN_POST_TFLITE_2_5)
271template <typename T, typename B = float>
272std::vector<char> CreateConv3dTfLiteModel(tflite::BuiltinOperator convolutionOperatorCode,
273 tflite::TensorType tensorType,
274 std::vector<uint32_t> strides,
275 std::vector<uint32_t> dilation,
276 tflite::Padding padding,
277 tflite::ActivationFunctionType fused_activation_function,
278 const std::vector<int32_t>& inputTensorShape,
279 const std::vector<int32_t>& filterTensorShape,
280 const std::vector<int32_t>& biasTensorShape,
281 const std::vector<int32_t>& outputTensorShape,
282 const std::vector<T>& filterData,
283 const std::vector<B>& biasData,
284 const std::vector<float> biasScales = {1.0f},
285 const std::vector<int64_t> biasOffsets = {0},
286 const std::vector<float> filterScales = {1.0f},
287 const std::vector<int64_t> filterOffsets = {0},
288 float outputQuantScale = 2.0f,
289 int outputQuantOffset = 0,
290 float quantScale = 1.0f,
291 int quantOffset = 0,
292 int32_t depth_multiplier = 1,
293 int32_t filterQuantizationDim = 0)
294{
295 using namespace tflite;
296 flatbuffers::FlatBufferBuilder flatBufferBuilder;
297
298 std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +0000299 buffers[0] = CreateBuffer(flatBufferBuilder);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100300 buffers[1] = CreateBuffer(flatBufferBuilder,
301 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
302 sizeof(T) * filterData.size()));
303
304 buffers[2] = CreateBuffer(flatBufferBuilder,
305 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
306 sizeof(B) * biasData.size()));
307
308 auto quantizationParameters =
309 CreateQuantizationParameters(flatBufferBuilder,
310 0,
311 0,
312 flatBufferBuilder.CreateVector<float>({ quantScale }),
313 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
314 auto outputQuantizationParameters =
315 CreateQuantizationParameters(flatBufferBuilder,
316 0,
317 0,
318 flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
319 flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
320
321 auto filterQuantizationParameters =
322 CreateQuantizationParameters(flatBufferBuilder,
323 0,
324 0,
325 flatBufferBuilder.CreateVector<float>(filterScales),
326 flatBufferBuilder.CreateVector<int64_t>(filterOffsets),
327 tflite::QuantizationDetails_NONE,
328 0,
329 filterQuantizationDim);
330
331 auto biasQuantizationParameters =
332 CreateQuantizationParameters(flatBufferBuilder,
333 0,
334 0,
335 flatBufferBuilder.CreateVector<float>(biasScales),
336 flatBufferBuilder.CreateVector<int64_t>(biasOffsets));
337
338 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
339 tensors[0] = CreateTensor(flatBufferBuilder,
340 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
341 inputTensorShape.size()),
342 tensorType,
343 0,
344 flatBufferBuilder.CreateString("input"),
345 quantizationParameters);
346 tensors[1] = CreateTensor(flatBufferBuilder,
347 flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
348 filterTensorShape.size()),
349 tensorType,
350 1,
351 flatBufferBuilder.CreateString("filter"),
352 filterQuantizationParameters);
353
354 auto biasTensorType = ::tflite::TensorType_FLOAT32;
355 if (tensorType == ::tflite::TensorType_INT8 || tensorType == ::tflite::TensorType_UINT8)
356 {
357 biasTensorType = ::tflite::TensorType_INT32;
358 }
359 tensors[2] = CreateTensor(flatBufferBuilder,
360 flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
361 biasTensorType,
362 2,
363 flatBufferBuilder.CreateString("bias"),
364 biasQuantizationParameters);
365 tensors[3] = CreateTensor(flatBufferBuilder,
366 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
367 outputTensorShape.size()),
368 tensorType,
369 0,
370 flatBufferBuilder.CreateString("output"),
371 outputQuantizationParameters);
372
373 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_Conv3DOptions;
374 flatbuffers::Offset<void> operatorBuiltinOptions = CreateConv3DOptions(flatBufferBuilder,
375 padding,
376 strides[2], // Depth
377 strides[0], // Width
378 strides[1], // Height
379 fused_activation_function,
380 dilation[2],
381 dilation[0],
382 dilation[1]).Union();
383
384 // Create operator
385 const std::vector<int> operatorInputs{0, 1, 2};
386 const std::vector<int> operatorOutputs{3};
387 flatbuffers::Offset <Operator> convolutionOperator =
388 CreateOperator(flatBufferBuilder,
389 0,
390 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
391 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
392 operatorBuiltinOptionsType,
393 operatorBuiltinOptions);
394
395 const std::vector<int> subgraphInputs{0, 1, 2};
396 const std::vector<int> subgraphOutputs{3};
397 flatbuffers::Offset <SubGraph> subgraph =
398 CreateSubGraph(flatBufferBuilder,
399 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
400 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
401 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
402 flatBufferBuilder.CreateVector(&convolutionOperator, 1));
403
404 flatbuffers::Offset <flatbuffers::String> modelDescription =
405 flatBufferBuilder.CreateString("ArmnnDelegate: Convolution 3d Operator Model");
406
407 // If using an operator with a code greater than 127 then the enum value should be passed as the fifth
408 // parameter rather than the second like in other tests.
409 flatbuffers::Offset <OperatorCode> operatorCode =
410 CreateOperatorCode(flatBufferBuilder, 0, 0, 1, tflite::BuiltinOperator_CONV_3D);
411
412 flatbuffers::Offset <Model> flatbufferModel =
413 CreateModel(flatBufferBuilder,
414 TFLITE_SCHEMA_VERSION,
415 flatBufferBuilder.CreateVector(&operatorCode, 1),
416 flatBufferBuilder.CreateVector(&subgraph, 1),
417 modelDescription,
418 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
419
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100420 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100421
422 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
423 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
424}
425
426template <typename T, typename B = float>
427void Convolution3dTest(tflite::BuiltinOperator convolutionOperatorCode,
428 tflite::TensorType tensorType,
429 std::vector<uint32_t> strides,
430 std::vector<uint32_t> dilation,
431 tflite::Padding padding,
432 tflite::ActivationFunctionType fused_activation_function,
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100433 std::vector<int32_t>& inputShape,
434 std::vector<int32_t>& filterShape,
435 std::vector<int32_t>& outputShape,
436 std::vector<T>& inputValues,
437 std::vector<T>& filterValues,
438 std::vector<T>& expectedOutputValues,
439 const std::vector<int32_t>& biasShape = {},
440 const std::vector<B>& biasValues = {},
441 const std::vector<float> biasScales = {1.0f},
442 const std::vector<int64_t> biasOffsets = {0},
443 const std::vector<float> filterScales = {1.0f},
444 const std::vector<int64_t> filterOffsets = {0},
445 float outputQuantScale = 2.0f,
446 int outputQuantOffset = 0,
447 float quantScale = 1.0f,
448 int quantOffset = 0,
449 int32_t depth_multiplier = 1,
Colm Donelaneff204a2023-11-28 15:46:09 +0000450 int32_t filterQuantizationDim = 3,
451 const std::vector<armnn::BackendId>& backends = {})
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100452{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100453 using namespace delegateTestInterpreter;
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100454
455 std::vector<char> modelBuffer;
456 modelBuffer = CreateConv3dTfLiteModel(convolutionOperatorCode,
457 tensorType,
458 strides,
459 dilation,
460 padding,
461 fused_activation_function,
462 inputShape,
463 filterShape,
464 biasShape,
465 outputShape,
466 filterValues,
467 biasValues,
468 biasScales,
469 biasOffsets,
470 filterScales,
471 filterOffsets,
472 outputQuantScale,
473 outputQuantOffset,
474 quantScale,
475 quantOffset,
476 depth_multiplier,
477 filterQuantizationDim);
478
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100479 // Setup interpreter with just TFLite Runtime.
480 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
481 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
482 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
483 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
484 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
485 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100486
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100487 // Setup interpreter with Arm NN Delegate applied.
Colm Donelaneff204a2023-11-28 15:46:09 +0000488 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100489 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
490 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
491 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
492 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
493 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100494
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100495 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100496
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100497 armnnDelegate::CompareData(expectedOutputValues.data(), armnnOutputValues.data(), expectedOutputValues.size(), 1);
498 armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteOutputValues.data(), expectedOutputValues.size(), 1);
499 armnnDelegate::CompareData(tfLiteOutputValues.data(), armnnOutputValues.data(), expectedOutputValues.size(), 1);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100500
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100501 tfLiteInterpreter.Cleanup();
502 armnnInterpreter.Cleanup();
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100503}
504#endif
505
Sadik Armagan32ca1442020-11-13 17:51:56 +0000506template <typename T>
507std::vector<char> CreateTransposeConvTfLiteModel(tflite::TensorType tensorType,
508 uint32_t strideX,
509 uint32_t strideY,
510 tflite::Padding padding,
511 const std::vector <int32_t>& transposeTensorShape,
512 const std::vector <int32_t>& filterTensorShape,
513 const std::vector <int32_t>& inputTensorShape,
514 const std::vector <int32_t>& outputTensorShape,
515 const std::vector <int32_t>& transposeData,
516 const std::vector <T>& filterData,
517 float filterScale = 1.0f,
518 int filterOffset = 0,
519 float outputQuantScale = 2.0f,
520 int outputQuantOffset = 0,
521 float quantScale = 1.0f,
522 int quantOffset = 0)
523{
524 using namespace tflite;
525 flatbuffers::FlatBufferBuilder flatBufferBuilder;
526
527 std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +0000528 buffers[0] = CreateBuffer(flatBufferBuilder);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000529 buffers[1] = CreateBuffer(flatBufferBuilder,
530 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(transposeData.data()),
531 sizeof(int32_t) * transposeData.size()));
532 buffers[2] = CreateBuffer(flatBufferBuilder,
533 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
534 sizeof(T) * filterData.size()));
535
536 auto quantizationParameters =
537 CreateQuantizationParameters(flatBufferBuilder,
538 0,
539 0,
540 flatBufferBuilder.CreateVector<float>({ quantScale }),
541 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
542 auto outputQuantizationParameters =
543 CreateQuantizationParameters(flatBufferBuilder,
544 0,
545 0,
546 flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
547 flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
548 auto filterQuantizationParameters =
549 CreateQuantizationParameters(flatBufferBuilder,
550 0,
551 0,
552 flatBufferBuilder.CreateVector<float>({ filterScale }),
553 flatBufferBuilder.CreateVector<int64_t>({ filterOffset }));
554
555 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
556 tensors[0] = CreateTensor(flatBufferBuilder,
557 flatBufferBuilder.CreateVector<int32_t>(transposeTensorShape.data(),
558 transposeTensorShape.size()),
559 tflite::TensorType_INT32,
560 1);
561 tensors[1] = CreateTensor(flatBufferBuilder,
562 flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
563 filterTensorShape.size()),
564 tensorType,
565 2,
566 flatBufferBuilder.CreateString("filter"),
567 filterQuantizationParameters);
568 tensors[2] = CreateTensor(flatBufferBuilder,
569 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
570 inputTensorShape.size()),
571 tensorType,
572 0,
573 flatBufferBuilder.CreateString("input"),
574 quantizationParameters);
575 tensors[3] = CreateTensor(flatBufferBuilder,
576 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
577 outputTensorShape.size()),
578 tensorType,
579 0,
580 flatBufferBuilder.CreateString("output"),
581 outputQuantizationParameters);
582
583 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_TransposeConvOptions;
584 flatbuffers::Offset<void> operatorBuiltinOptions =
585 CreateTransposeConvOptions(flatBufferBuilder, padding, strideX, strideY).Union();
586
587 // create operator
Keith Davis892fafe2020-11-26 17:40:35 +0000588 const std::vector<int> operatorInputs{0, 1, 2};
589 const std::vector<int> operatorOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000590 flatbuffers::Offset <Operator> convolutionOperator =
591 CreateOperator(flatBufferBuilder,
592 0,
593 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
594 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
595 operatorBuiltinOptionsType,
596 operatorBuiltinOptions);
597
Keith Davis892fafe2020-11-26 17:40:35 +0000598 const std::vector<int> subgraphInputs{0, 1, 2};
599 const std::vector<int> subgraphOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000600 flatbuffers::Offset <SubGraph> subgraph =
601 CreateSubGraph(flatBufferBuilder,
602 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
603 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
604 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
605 flatBufferBuilder.CreateVector(&convolutionOperator, 1));
606
607 flatbuffers::Offset <flatbuffers::String> modelDescription =
608 flatBufferBuilder.CreateString("ArmnnDelegate: TransposeConv Operator Model");
609 flatbuffers::Offset <OperatorCode> operatorCode =
610 CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_TRANSPOSE_CONV);
611
612 flatbuffers::Offset <Model> flatbufferModel =
613 CreateModel(flatBufferBuilder,
614 TFLITE_SCHEMA_VERSION,
615 flatBufferBuilder.CreateVector(&operatorCode, 1),
616 flatBufferBuilder.CreateVector(&subgraph, 1),
617 modelDescription,
618 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
619
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100620 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000621
622 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
623 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
624}
625
626template <typename T>
Colm Donelaneff204a2023-11-28 15:46:09 +0000627void TransposeConvTest(tflite::TensorType tensorType,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000628 uint32_t strideX,
629 uint32_t strideY,
630 tflite::Padding padding,
631 const std::vector <int32_t>& transposeTensorShape,
632 const std::vector <int32_t>& filterTensorShape,
633 const std::vector <int32_t>& inputTensorShape,
634 const std::vector <int32_t>& outputTensorShape,
635 const std::vector <int32_t>& transposeData,
636 const std::vector <T>& filterData,
637 std::vector<T>& inputValues,
638 std::vector<T>& expectedOutputValues,
639 float filterScale = 1.0f,
640 int filterOffset = 0,
641 float outputQuantScale = 1.0f,
642 int outputQuantOffset = 0,
643 float quantScale = 1.0f,
Colm Donelaneff204a2023-11-28 15:46:09 +0000644 int quantOffset = 0,
645 const std::vector<armnn::BackendId>& backends = {})
Sadik Armagan32ca1442020-11-13 17:51:56 +0000646{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100647 using namespace delegateTestInterpreter;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000648
649 std::vector<char> modelBuffer;
650 modelBuffer = CreateTransposeConvTfLiteModel<T>(tensorType,
651 strideX,
652 strideY,
653 padding,
654 transposeTensorShape,
655 filterTensorShape,
656 inputTensorShape,
657 outputTensorShape,
658 transposeData,
659 filterData,
660 filterScale,
661 filterOffset,
662 outputQuantScale,
663 outputQuantOffset,
664 quantScale,
665 quantOffset);
666
667
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100668 // Setup interpreter with just TFLite Runtime.
669 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
670 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
671 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 2) == kTfLiteOk);
672 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
673 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
674 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000675
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100676 // Setup interpreter with Arm NN Delegate applied.
Colm Donelaneff204a2023-11-28 15:46:09 +0000677 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100678 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
679 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 2) == kTfLiteOk);
680 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
681 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
682 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000683
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100684 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
685 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000686
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100687 tfLiteInterpreter.Cleanup();
688 armnnInterpreter.Cleanup();
Sadik Armagan32ca1442020-11-13 17:51:56 +0000689}
690
691} // anonymous namespace
692
693
694
695