blob: f651ad5e7e6cabaf4a2341f7449bfb34ca2e98c9 [file] [log] [blame]
Sadik Armagan32ca1442020-11-13 17:51:56 +00001//
Ryan OShea238ecd92023-03-07 11:44:23 +00002// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan32ca1442020-11-13 17:51:56 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan81ec9942021-10-12 10:26:30 +01008#include "TestUtils.hpp"
9
Sadik Armagan32ca1442020-11-13 17:51:56 +000010#include <armnn_delegate.hpp>
Matthew Sloyanebe392d2023-03-30 10:12:08 +010011#include <DelegateTestInterpreter.hpp>
Sadik Armagan32ca1442020-11-13 17:51:56 +000012
13#include <flatbuffers/flatbuffers.h>
14#include <tensorflow/lite/interpreter.h>
15#include <tensorflow/lite/kernels/register.h>
16#include <tensorflow/lite/model.h>
Sadik Armagan32ca1442020-11-13 17:51:56 +000017#include <tensorflow/lite/version.h>
18
19#include <doctest/doctest.h>
20
21namespace
22{
23
24template <typename T, typename B = float>
25std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOperatorCode,
26 tflite::TensorType tensorType,
27 uint32_t strideX,
28 uint32_t strideY,
29 uint32_t dilationX,
30 uint32_t dilationY,
31 tflite::Padding padding,
32 tflite::ActivationFunctionType fused_activation_function,
33 const std::vector <int32_t>& inputTensorShape,
34 const std::vector <int32_t>& filterTensorShape,
35 const std::vector <int32_t>& biasTensorShape,
36 const std::vector <int32_t>& outputTensorShape,
37 const std::vector <T>& filterData,
38 const std::vector <B>& biasData,
Jan Eilers7612bd62021-04-06 17:29:03 +010039 const std::vector<float> biasScales = {1.0f},
40 const std::vector<int64_t> biasOffsets = {0},
41 const std::vector<float> filterScales = {1.0f},
42 const std::vector<int64_t> filterOffsets = {0},
Sadik Armagan32ca1442020-11-13 17:51:56 +000043 float outputQuantScale = 2.0f,
44 int outputQuantOffset = 0,
45 float quantScale = 1.0f,
46 int quantOffset = 0,
Jan Eilers7612bd62021-04-06 17:29:03 +010047 int32_t depth_multiplier = 1,
48 int32_t filterQuantizationDim = 0)
Sadik Armagan32ca1442020-11-13 17:51:56 +000049{
50 using namespace tflite;
51 flatbuffers::FlatBufferBuilder flatBufferBuilder;
52
Ryan OShea238ecd92023-03-07 11:44:23 +000053 std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
54 buffers[0] = CreateBuffer(flatBufferBuilder);
55 buffers[1] = CreateBuffer(flatBufferBuilder);
56 buffers[2] = CreateBuffer(flatBufferBuilder,
Sadik Armagan32ca1442020-11-13 17:51:56 +000057 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
58 sizeof(T) * filterData.size()));
59
Ryan OShea238ecd92023-03-07 11:44:23 +000060 buffers[3] = CreateBuffer(flatBufferBuilder,
Sadik Armagan32ca1442020-11-13 17:51:56 +000061 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
62 sizeof(B) * biasData.size()));
Ryan OShea238ecd92023-03-07 11:44:23 +000063 buffers[4] = CreateBuffer(flatBufferBuilder);
Sadik Armagan32ca1442020-11-13 17:51:56 +000064
65 auto quantizationParameters =
66 CreateQuantizationParameters(flatBufferBuilder,
67 0,
68 0,
69 flatBufferBuilder.CreateVector<float>({ quantScale }),
70 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
71 auto outputQuantizationParameters =
72 CreateQuantizationParameters(flatBufferBuilder,
73 0,
74 0,
75 flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
76 flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
Jan Eilers7612bd62021-04-06 17:29:03 +010077
Sadik Armagan32ca1442020-11-13 17:51:56 +000078 auto filterQuantizationParameters =
Jan Eilers7612bd62021-04-06 17:29:03 +010079 CreateQuantizationParameters(flatBufferBuilder,
80 0,
81 0,
82 flatBufferBuilder.CreateVector<float>(filterScales),
83 flatBufferBuilder.CreateVector<int64_t>(filterOffsets),
84 tflite::QuantizationDetails_NONE,
85 0,
86 filterQuantizationDim);
87
88 auto biasQuantizationParameters =
89 CreateQuantizationParameters(flatBufferBuilder,
90 0,
91 0,
92 flatBufferBuilder.CreateVector<float>(biasScales),
93 flatBufferBuilder.CreateVector<int64_t>(biasOffsets));
Sadik Armagan32ca1442020-11-13 17:51:56 +000094
95 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
96 tensors[0] = CreateTensor(flatBufferBuilder,
97 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
98 inputTensorShape.size()),
99 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +0000100 1,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000101 flatBufferBuilder.CreateString("input"),
102 quantizationParameters);
103 tensors[1] = CreateTensor(flatBufferBuilder,
104 flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
105 filterTensorShape.size()),
106 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +0000107 2,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000108 flatBufferBuilder.CreateString("filter"),
109 filterQuantizationParameters);
110
111 auto biasTensorType = ::tflite::TensorType_FLOAT32;
Jan Eilerseb616122020-11-20 11:59:40 +0000112 if (tensorType == ::tflite::TensorType_INT8 || tensorType == ::tflite::TensorType_UINT8)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000113 {
114 biasTensorType = ::tflite::TensorType_INT32;
115 }
116 tensors[2] = CreateTensor(flatBufferBuilder,
117 flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
118 biasTensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +0000119 3,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000120 flatBufferBuilder.CreateString("bias"),
Jan Eilers7612bd62021-04-06 17:29:03 +0100121 biasQuantizationParameters);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000122 tensors[3] = CreateTensor(flatBufferBuilder,
123 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
124 outputTensorShape.size()),
125 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +0000126 4,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000127 flatBufferBuilder.CreateString("output"),
128 outputQuantizationParameters);
129
130 flatbuffers::Offset<void> operatorBuiltinOptions;
131 tflite::BuiltinOptions operatorBuiltinOptionsType;
132
133 if(convolutionOperatorCode == tflite::BuiltinOperator_DEPTHWISE_CONV_2D)
134 {
135 operatorBuiltinOptionsType = tflite::BuiltinOptions_DepthwiseConv2DOptions;
136 operatorBuiltinOptions = CreateDepthwiseConv2DOptions(flatBufferBuilder,
137 padding,
138 strideX,
139 strideY,
140 depth_multiplier,
141 fused_activation_function,
142 dilationX,
143 dilationY).Union();
144 }
145 if(convolutionOperatorCode == tflite::BuiltinOperator_CONV_2D)
146 {
147 operatorBuiltinOptionsType = tflite::BuiltinOptions_Conv2DOptions;
148 operatorBuiltinOptions = CreateConv2DOptions(flatBufferBuilder,
149 padding,
150 strideX,
151 strideY,
152 fused_activation_function,
153 dilationX,
154 dilationY).Union();
155 }
156
157 // create operator
Keith Davis892fafe2020-11-26 17:40:35 +0000158 const std::vector<int> operatorInputs{0, 1, 2};
159 const std::vector<int> operatorOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000160 flatbuffers::Offset <Operator> convolutionOperator =
161 CreateOperator(flatBufferBuilder,
162 0,
163 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
164 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
165 operatorBuiltinOptionsType,
166 operatorBuiltinOptions);
167
Keith Davis892fafe2020-11-26 17:40:35 +0000168 const std::vector<int> subgraphInputs{0, 1, 2};
169 const std::vector<int> subgraphOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000170 flatbuffers::Offset <SubGraph> subgraph =
171 CreateSubGraph(flatBufferBuilder,
172 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
173 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
174 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
175 flatBufferBuilder.CreateVector(&convolutionOperator, 1));
176
177 flatbuffers::Offset <flatbuffers::String> modelDescription =
178 flatBufferBuilder.CreateString("ArmnnDelegate: Convolution2d Operator Model");
179 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, convolutionOperatorCode);
180
181 flatbuffers::Offset <Model> flatbufferModel =
182 CreateModel(flatBufferBuilder,
183 TFLITE_SCHEMA_VERSION,
184 flatBufferBuilder.CreateVector(&operatorCode, 1),
185 flatBufferBuilder.CreateVector(&subgraph, 1),
186 modelDescription,
187 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
188
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100189 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000190
191 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
192 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
193}
194
195template <typename T, typename B = float>
196void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode,
197 tflite::TensorType tensorType,
198 uint32_t strideX,
199 uint32_t strideY,
200 uint32_t dilationX,
201 uint32_t dilationY,
202 tflite::Padding padding,
203 tflite::ActivationFunctionType fused_activation_function,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000204 std::vector<int32_t>& inputShape,
205 std::vector<int32_t>& filterShape,
206 std::vector<int32_t>& outputShape,
207 std::vector<T>& inputValues,
208 std::vector<T>& filterValues,
209 std::vector<T>& expectedOutputValues,
210 const std::vector<int32_t>& biasShape = {},
211 const std::vector<B>& biasValues = {},
Jan Eilers7612bd62021-04-06 17:29:03 +0100212 const std::vector<float> biasScales = {1.0f},
213 const std::vector<int64_t> biasOffsets = {0},
214 const std::vector<float> filterScales = {1.0f},
215 const std::vector<int64_t> filterOffsets = {0},
Sadik Armagan32ca1442020-11-13 17:51:56 +0000216 float outputQuantScale = 2.0f,
217 int outputQuantOffset = 0,
218 float quantScale = 1.0f,
219 int quantOffset = 0,
Jan Eilers7612bd62021-04-06 17:29:03 +0100220 int32_t depth_multiplier = 1,
Colm Donelaneff204a2023-11-28 15:46:09 +0000221 int32_t filterQuantizationDim = 3,
222 const std::vector<armnn::BackendId>& backends = {})
Sadik Armagan32ca1442020-11-13 17:51:56 +0000223{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100224 using namespace delegateTestInterpreter;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000225
226 std::vector<char> modelBuffer;
227 modelBuffer = CreateConv2dTfLiteModel(convolutionOperatorCode,
228 tensorType,
229 strideX,
230 strideY,
231 dilationX,
232 dilationY,
233 padding,
234 fused_activation_function,
235 inputShape,
236 filterShape,
237 biasShape,
238 outputShape,
239 filterValues,
240 biasValues,
Jan Eilers7612bd62021-04-06 17:29:03 +0100241 biasScales,
242 biasOffsets,
243 filterScales,
244 filterOffsets,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000245 outputQuantScale,
246 outputQuantOffset,
247 quantScale,
248 quantOffset,
Jan Eilers7612bd62021-04-06 17:29:03 +0100249 depth_multiplier,
250 filterQuantizationDim);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000251
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100252 // Setup interpreter with just TFLite Runtime.
253 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
254 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
255 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
256 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
257 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
258 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000259
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100260 // Setup interpreter with Arm NN Delegate applied.
Colm Donelaneff204a2023-11-28 15:46:09 +0000261 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100262 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
263 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
264 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
265 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
266 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000267
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100268 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
269 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000270
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100271 tfLiteInterpreter.Cleanup();
272 armnnInterpreter.Cleanup();
Sadik Armagan32ca1442020-11-13 17:51:56 +0000273}
274
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100275// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
276#if defined(ARMNN_POST_TFLITE_2_5)
277template <typename T, typename B = float>
278std::vector<char> CreateConv3dTfLiteModel(tflite::BuiltinOperator convolutionOperatorCode,
279 tflite::TensorType tensorType,
280 std::vector<uint32_t> strides,
281 std::vector<uint32_t> dilation,
282 tflite::Padding padding,
283 tflite::ActivationFunctionType fused_activation_function,
284 const std::vector<int32_t>& inputTensorShape,
285 const std::vector<int32_t>& filterTensorShape,
286 const std::vector<int32_t>& biasTensorShape,
287 const std::vector<int32_t>& outputTensorShape,
288 const std::vector<T>& filterData,
289 const std::vector<B>& biasData,
290 const std::vector<float> biasScales = {1.0f},
291 const std::vector<int64_t> biasOffsets = {0},
292 const std::vector<float> filterScales = {1.0f},
293 const std::vector<int64_t> filterOffsets = {0},
294 float outputQuantScale = 2.0f,
295 int outputQuantOffset = 0,
296 float quantScale = 1.0f,
297 int quantOffset = 0,
298 int32_t depth_multiplier = 1,
299 int32_t filterQuantizationDim = 0)
300{
301 using namespace tflite;
302 flatbuffers::FlatBufferBuilder flatBufferBuilder;
303
304 std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +0000305 buffers[0] = CreateBuffer(flatBufferBuilder);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100306 buffers[1] = CreateBuffer(flatBufferBuilder,
307 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
308 sizeof(T) * filterData.size()));
309
310 buffers[2] = CreateBuffer(flatBufferBuilder,
311 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
312 sizeof(B) * biasData.size()));
313
314 auto quantizationParameters =
315 CreateQuantizationParameters(flatBufferBuilder,
316 0,
317 0,
318 flatBufferBuilder.CreateVector<float>({ quantScale }),
319 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
320 auto outputQuantizationParameters =
321 CreateQuantizationParameters(flatBufferBuilder,
322 0,
323 0,
324 flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
325 flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
326
327 auto filterQuantizationParameters =
328 CreateQuantizationParameters(flatBufferBuilder,
329 0,
330 0,
331 flatBufferBuilder.CreateVector<float>(filterScales),
332 flatBufferBuilder.CreateVector<int64_t>(filterOffsets),
333 tflite::QuantizationDetails_NONE,
334 0,
335 filterQuantizationDim);
336
337 auto biasQuantizationParameters =
338 CreateQuantizationParameters(flatBufferBuilder,
339 0,
340 0,
341 flatBufferBuilder.CreateVector<float>(biasScales),
342 flatBufferBuilder.CreateVector<int64_t>(biasOffsets));
343
344 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
345 tensors[0] = CreateTensor(flatBufferBuilder,
346 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
347 inputTensorShape.size()),
348 tensorType,
349 0,
350 flatBufferBuilder.CreateString("input"),
351 quantizationParameters);
352 tensors[1] = CreateTensor(flatBufferBuilder,
353 flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
354 filterTensorShape.size()),
355 tensorType,
356 1,
357 flatBufferBuilder.CreateString("filter"),
358 filterQuantizationParameters);
359
360 auto biasTensorType = ::tflite::TensorType_FLOAT32;
361 if (tensorType == ::tflite::TensorType_INT8 || tensorType == ::tflite::TensorType_UINT8)
362 {
363 biasTensorType = ::tflite::TensorType_INT32;
364 }
365 tensors[2] = CreateTensor(flatBufferBuilder,
366 flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
367 biasTensorType,
368 2,
369 flatBufferBuilder.CreateString("bias"),
370 biasQuantizationParameters);
371 tensors[3] = CreateTensor(flatBufferBuilder,
372 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
373 outputTensorShape.size()),
374 tensorType,
375 0,
376 flatBufferBuilder.CreateString("output"),
377 outputQuantizationParameters);
378
379 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_Conv3DOptions;
380 flatbuffers::Offset<void> operatorBuiltinOptions = CreateConv3DOptions(flatBufferBuilder,
381 padding,
382 strides[2], // Depth
383 strides[0], // Width
384 strides[1], // Height
385 fused_activation_function,
386 dilation[2],
387 dilation[0],
388 dilation[1]).Union();
389
390 // Create operator
391 const std::vector<int> operatorInputs{0, 1, 2};
392 const std::vector<int> operatorOutputs{3};
393 flatbuffers::Offset <Operator> convolutionOperator =
394 CreateOperator(flatBufferBuilder,
395 0,
396 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
397 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
398 operatorBuiltinOptionsType,
399 operatorBuiltinOptions);
400
401 const std::vector<int> subgraphInputs{0, 1, 2};
402 const std::vector<int> subgraphOutputs{3};
403 flatbuffers::Offset <SubGraph> subgraph =
404 CreateSubGraph(flatBufferBuilder,
405 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
406 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
407 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
408 flatBufferBuilder.CreateVector(&convolutionOperator, 1));
409
410 flatbuffers::Offset <flatbuffers::String> modelDescription =
411 flatBufferBuilder.CreateString("ArmnnDelegate: Convolution 3d Operator Model");
412
413 // If using an operator with a code greater than 127 then the enum value should be passed as the fifth
414 // parameter rather than the second like in other tests.
415 flatbuffers::Offset <OperatorCode> operatorCode =
416 CreateOperatorCode(flatBufferBuilder, 0, 0, 1, tflite::BuiltinOperator_CONV_3D);
417
418 flatbuffers::Offset <Model> flatbufferModel =
419 CreateModel(flatBufferBuilder,
420 TFLITE_SCHEMA_VERSION,
421 flatBufferBuilder.CreateVector(&operatorCode, 1),
422 flatBufferBuilder.CreateVector(&subgraph, 1),
423 modelDescription,
424 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
425
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100426 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100427
428 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
429 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
430}
431
432template <typename T, typename B = float>
433void Convolution3dTest(tflite::BuiltinOperator convolutionOperatorCode,
434 tflite::TensorType tensorType,
435 std::vector<uint32_t> strides,
436 std::vector<uint32_t> dilation,
437 tflite::Padding padding,
438 tflite::ActivationFunctionType fused_activation_function,
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100439 std::vector<int32_t>& inputShape,
440 std::vector<int32_t>& filterShape,
441 std::vector<int32_t>& outputShape,
442 std::vector<T>& inputValues,
443 std::vector<T>& filterValues,
444 std::vector<T>& expectedOutputValues,
445 const std::vector<int32_t>& biasShape = {},
446 const std::vector<B>& biasValues = {},
447 const std::vector<float> biasScales = {1.0f},
448 const std::vector<int64_t> biasOffsets = {0},
449 const std::vector<float> filterScales = {1.0f},
450 const std::vector<int64_t> filterOffsets = {0},
451 float outputQuantScale = 2.0f,
452 int outputQuantOffset = 0,
453 float quantScale = 1.0f,
454 int quantOffset = 0,
455 int32_t depth_multiplier = 1,
Colm Donelaneff204a2023-11-28 15:46:09 +0000456 int32_t filterQuantizationDim = 3,
457 const std::vector<armnn::BackendId>& backends = {})
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100458{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100459 using namespace delegateTestInterpreter;
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100460
461 std::vector<char> modelBuffer;
462 modelBuffer = CreateConv3dTfLiteModel(convolutionOperatorCode,
463 tensorType,
464 strides,
465 dilation,
466 padding,
467 fused_activation_function,
468 inputShape,
469 filterShape,
470 biasShape,
471 outputShape,
472 filterValues,
473 biasValues,
474 biasScales,
475 biasOffsets,
476 filterScales,
477 filterOffsets,
478 outputQuantScale,
479 outputQuantOffset,
480 quantScale,
481 quantOffset,
482 depth_multiplier,
483 filterQuantizationDim);
484
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100485 // Setup interpreter with just TFLite Runtime.
486 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
487 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
488 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
489 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
490 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
491 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100492
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100493 // Setup interpreter with Arm NN Delegate applied.
Colm Donelaneff204a2023-11-28 15:46:09 +0000494 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100495 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
496 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
497 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
498 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
499 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100500
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100501 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100502
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100503 armnnDelegate::CompareData(expectedOutputValues.data(), armnnOutputValues.data(), expectedOutputValues.size(), 1);
504 armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteOutputValues.data(), expectedOutputValues.size(), 1);
505 armnnDelegate::CompareData(tfLiteOutputValues.data(), armnnOutputValues.data(), expectedOutputValues.size(), 1);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100506
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100507 tfLiteInterpreter.Cleanup();
508 armnnInterpreter.Cleanup();
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100509}
510#endif
511
Sadik Armagan32ca1442020-11-13 17:51:56 +0000512template <typename T>
513std::vector<char> CreateTransposeConvTfLiteModel(tflite::TensorType tensorType,
514 uint32_t strideX,
515 uint32_t strideY,
516 tflite::Padding padding,
517 const std::vector <int32_t>& transposeTensorShape,
518 const std::vector <int32_t>& filterTensorShape,
519 const std::vector <int32_t>& inputTensorShape,
520 const std::vector <int32_t>& outputTensorShape,
521 const std::vector <int32_t>& transposeData,
522 const std::vector <T>& filterData,
523 float filterScale = 1.0f,
524 int filterOffset = 0,
525 float outputQuantScale = 2.0f,
526 int outputQuantOffset = 0,
527 float quantScale = 1.0f,
528 int quantOffset = 0)
529{
530 using namespace tflite;
531 flatbuffers::FlatBufferBuilder flatBufferBuilder;
532
533 std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +0000534 buffers[0] = CreateBuffer(flatBufferBuilder);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000535 buffers[1] = CreateBuffer(flatBufferBuilder,
536 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(transposeData.data()),
537 sizeof(int32_t) * transposeData.size()));
538 buffers[2] = CreateBuffer(flatBufferBuilder,
539 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
540 sizeof(T) * filterData.size()));
541
542 auto quantizationParameters =
543 CreateQuantizationParameters(flatBufferBuilder,
544 0,
545 0,
546 flatBufferBuilder.CreateVector<float>({ quantScale }),
547 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
548 auto outputQuantizationParameters =
549 CreateQuantizationParameters(flatBufferBuilder,
550 0,
551 0,
552 flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
553 flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
554 auto filterQuantizationParameters =
555 CreateQuantizationParameters(flatBufferBuilder,
556 0,
557 0,
558 flatBufferBuilder.CreateVector<float>({ filterScale }),
559 flatBufferBuilder.CreateVector<int64_t>({ filterOffset }));
560
561 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
562 tensors[0] = CreateTensor(flatBufferBuilder,
563 flatBufferBuilder.CreateVector<int32_t>(transposeTensorShape.data(),
564 transposeTensorShape.size()),
565 tflite::TensorType_INT32,
566 1);
567 tensors[1] = CreateTensor(flatBufferBuilder,
568 flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
569 filterTensorShape.size()),
570 tensorType,
571 2,
572 flatBufferBuilder.CreateString("filter"),
573 filterQuantizationParameters);
574 tensors[2] = CreateTensor(flatBufferBuilder,
575 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
576 inputTensorShape.size()),
577 tensorType,
578 0,
579 flatBufferBuilder.CreateString("input"),
580 quantizationParameters);
581 tensors[3] = CreateTensor(flatBufferBuilder,
582 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
583 outputTensorShape.size()),
584 tensorType,
585 0,
586 flatBufferBuilder.CreateString("output"),
587 outputQuantizationParameters);
588
589 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_TransposeConvOptions;
590 flatbuffers::Offset<void> operatorBuiltinOptions =
591 CreateTransposeConvOptions(flatBufferBuilder, padding, strideX, strideY).Union();
592
593 // create operator
Keith Davis892fafe2020-11-26 17:40:35 +0000594 const std::vector<int> operatorInputs{0, 1, 2};
595 const std::vector<int> operatorOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000596 flatbuffers::Offset <Operator> convolutionOperator =
597 CreateOperator(flatBufferBuilder,
598 0,
599 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
600 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
601 operatorBuiltinOptionsType,
602 operatorBuiltinOptions);
603
Keith Davis892fafe2020-11-26 17:40:35 +0000604 const std::vector<int> subgraphInputs{0, 1, 2};
605 const std::vector<int> subgraphOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000606 flatbuffers::Offset <SubGraph> subgraph =
607 CreateSubGraph(flatBufferBuilder,
608 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
609 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
610 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
611 flatBufferBuilder.CreateVector(&convolutionOperator, 1));
612
613 flatbuffers::Offset <flatbuffers::String> modelDescription =
614 flatBufferBuilder.CreateString("ArmnnDelegate: TransposeConv Operator Model");
615 flatbuffers::Offset <OperatorCode> operatorCode =
616 CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_TRANSPOSE_CONV);
617
618 flatbuffers::Offset <Model> flatbufferModel =
619 CreateModel(flatBufferBuilder,
620 TFLITE_SCHEMA_VERSION,
621 flatBufferBuilder.CreateVector(&operatorCode, 1),
622 flatBufferBuilder.CreateVector(&subgraph, 1),
623 modelDescription,
624 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
625
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100626 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000627
628 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
629 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
630}
631
632template <typename T>
Colm Donelaneff204a2023-11-28 15:46:09 +0000633void TransposeConvTest(tflite::TensorType tensorType,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000634 uint32_t strideX,
635 uint32_t strideY,
636 tflite::Padding padding,
637 const std::vector <int32_t>& transposeTensorShape,
638 const std::vector <int32_t>& filterTensorShape,
639 const std::vector <int32_t>& inputTensorShape,
640 const std::vector <int32_t>& outputTensorShape,
641 const std::vector <int32_t>& transposeData,
642 const std::vector <T>& filterData,
643 std::vector<T>& inputValues,
644 std::vector<T>& expectedOutputValues,
645 float filterScale = 1.0f,
646 int filterOffset = 0,
647 float outputQuantScale = 1.0f,
648 int outputQuantOffset = 0,
649 float quantScale = 1.0f,
Colm Donelaneff204a2023-11-28 15:46:09 +0000650 int quantOffset = 0,
651 const std::vector<armnn::BackendId>& backends = {})
Sadik Armagan32ca1442020-11-13 17:51:56 +0000652{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100653 using namespace delegateTestInterpreter;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000654
655 std::vector<char> modelBuffer;
656 modelBuffer = CreateTransposeConvTfLiteModel<T>(tensorType,
657 strideX,
658 strideY,
659 padding,
660 transposeTensorShape,
661 filterTensorShape,
662 inputTensorShape,
663 outputTensorShape,
664 transposeData,
665 filterData,
666 filterScale,
667 filterOffset,
668 outputQuantScale,
669 outputQuantOffset,
670 quantScale,
671 quantOffset);
672
673
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100674 // Setup interpreter with just TFLite Runtime.
675 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
676 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
677 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 2) == kTfLiteOk);
678 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
679 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
680 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000681
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100682 // Setup interpreter with Arm NN Delegate applied.
Colm Donelaneff204a2023-11-28 15:46:09 +0000683 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100684 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
685 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 2) == kTfLiteOk);
686 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
687 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
688 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000689
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100690 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
691 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000692
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100693 tfLiteInterpreter.Cleanup();
694 armnnInterpreter.Cleanup();
Sadik Armagan32ca1442020-11-13 17:51:56 +0000695}
696
697} // anonymous namespace
698
699
700
701