blob: 2e211b2ee970ce6b39f6cbcdc6e36941d1049d20 [file] [log] [blame]
Sadik Armagan32ca1442020-11-13 17:51:56 +00001//
Ryan OShea238ecd92023-03-07 11:44:23 +00002// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan32ca1442020-11-13 17:51:56 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan81ec9942021-10-12 10:26:30 +01008#include "TestUtils.hpp"
9
Sadik Armagan32ca1442020-11-13 17:51:56 +000010#include <armnn_delegate.hpp>
11
12#include <flatbuffers/flatbuffers.h>
13#include <tensorflow/lite/interpreter.h>
14#include <tensorflow/lite/kernels/register.h>
15#include <tensorflow/lite/model.h>
Teresa Charlinad1b3d72023-03-14 12:10:28 +000016#include <schema_generated.h>
Sadik Armagan32ca1442020-11-13 17:51:56 +000017#include <tensorflow/lite/version.h>
18
19#include <doctest/doctest.h>
20
21namespace
22{
23
24template <typename T, typename B = float>
25std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOperatorCode,
26 tflite::TensorType tensorType,
27 uint32_t strideX,
28 uint32_t strideY,
29 uint32_t dilationX,
30 uint32_t dilationY,
31 tflite::Padding padding,
32 tflite::ActivationFunctionType fused_activation_function,
33 const std::vector <int32_t>& inputTensorShape,
34 const std::vector <int32_t>& filterTensorShape,
35 const std::vector <int32_t>& biasTensorShape,
36 const std::vector <int32_t>& outputTensorShape,
37 const std::vector <T>& filterData,
38 const std::vector <B>& biasData,
Jan Eilers7612bd62021-04-06 17:29:03 +010039 const std::vector<float> biasScales = {1.0f},
40 const std::vector<int64_t> biasOffsets = {0},
41 const std::vector<float> filterScales = {1.0f},
42 const std::vector<int64_t> filterOffsets = {0},
Sadik Armagan32ca1442020-11-13 17:51:56 +000043 float outputQuantScale = 2.0f,
44 int outputQuantOffset = 0,
45 float quantScale = 1.0f,
46 int quantOffset = 0,
Jan Eilers7612bd62021-04-06 17:29:03 +010047 int32_t depth_multiplier = 1,
48 int32_t filterQuantizationDim = 0)
Sadik Armagan32ca1442020-11-13 17:51:56 +000049{
50 using namespace tflite;
51 flatbuffers::FlatBufferBuilder flatBufferBuilder;
52
Ryan OShea238ecd92023-03-07 11:44:23 +000053 std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
54 buffers[0] = CreateBuffer(flatBufferBuilder);
55 buffers[1] = CreateBuffer(flatBufferBuilder);
56 buffers[2] = CreateBuffer(flatBufferBuilder,
Sadik Armagan32ca1442020-11-13 17:51:56 +000057 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
58 sizeof(T) * filterData.size()));
59
Ryan OShea238ecd92023-03-07 11:44:23 +000060 buffers[3] = CreateBuffer(flatBufferBuilder,
Sadik Armagan32ca1442020-11-13 17:51:56 +000061 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
62 sizeof(B) * biasData.size()));
Ryan OShea238ecd92023-03-07 11:44:23 +000063 buffers[4] = CreateBuffer(flatBufferBuilder);
Sadik Armagan32ca1442020-11-13 17:51:56 +000064
65 auto quantizationParameters =
66 CreateQuantizationParameters(flatBufferBuilder,
67 0,
68 0,
69 flatBufferBuilder.CreateVector<float>({ quantScale }),
70 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
71 auto outputQuantizationParameters =
72 CreateQuantizationParameters(flatBufferBuilder,
73 0,
74 0,
75 flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
76 flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
Jan Eilers7612bd62021-04-06 17:29:03 +010077
Sadik Armagan32ca1442020-11-13 17:51:56 +000078 auto filterQuantizationParameters =
Jan Eilers7612bd62021-04-06 17:29:03 +010079 CreateQuantizationParameters(flatBufferBuilder,
80 0,
81 0,
82 flatBufferBuilder.CreateVector<float>(filterScales),
83 flatBufferBuilder.CreateVector<int64_t>(filterOffsets),
84 tflite::QuantizationDetails_NONE,
85 0,
86 filterQuantizationDim);
87
88 auto biasQuantizationParameters =
89 CreateQuantizationParameters(flatBufferBuilder,
90 0,
91 0,
92 flatBufferBuilder.CreateVector<float>(biasScales),
93 flatBufferBuilder.CreateVector<int64_t>(biasOffsets));
Sadik Armagan32ca1442020-11-13 17:51:56 +000094
95 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
96 tensors[0] = CreateTensor(flatBufferBuilder,
97 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
98 inputTensorShape.size()),
99 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +0000100 1,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000101 flatBufferBuilder.CreateString("input"),
102 quantizationParameters);
103 tensors[1] = CreateTensor(flatBufferBuilder,
104 flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
105 filterTensorShape.size()),
106 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +0000107 2,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000108 flatBufferBuilder.CreateString("filter"),
109 filterQuantizationParameters);
110
111 auto biasTensorType = ::tflite::TensorType_FLOAT32;
Jan Eilerseb616122020-11-20 11:59:40 +0000112 if (tensorType == ::tflite::TensorType_INT8 || tensorType == ::tflite::TensorType_UINT8)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000113 {
114 biasTensorType = ::tflite::TensorType_INT32;
115 }
116 tensors[2] = CreateTensor(flatBufferBuilder,
117 flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
118 biasTensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +0000119 3,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000120 flatBufferBuilder.CreateString("bias"),
Jan Eilers7612bd62021-04-06 17:29:03 +0100121 biasQuantizationParameters);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000122 tensors[3] = CreateTensor(flatBufferBuilder,
123 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
124 outputTensorShape.size()),
125 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +0000126 4,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000127 flatBufferBuilder.CreateString("output"),
128 outputQuantizationParameters);
129
130 flatbuffers::Offset<void> operatorBuiltinOptions;
131 tflite::BuiltinOptions operatorBuiltinOptionsType;
132
133 if(convolutionOperatorCode == tflite::BuiltinOperator_DEPTHWISE_CONV_2D)
134 {
135 operatorBuiltinOptionsType = tflite::BuiltinOptions_DepthwiseConv2DOptions;
136 operatorBuiltinOptions = CreateDepthwiseConv2DOptions(flatBufferBuilder,
137 padding,
138 strideX,
139 strideY,
140 depth_multiplier,
141 fused_activation_function,
142 dilationX,
143 dilationY).Union();
144 }
145 if(convolutionOperatorCode == tflite::BuiltinOperator_CONV_2D)
146 {
147 operatorBuiltinOptionsType = tflite::BuiltinOptions_Conv2DOptions;
148 operatorBuiltinOptions = CreateConv2DOptions(flatBufferBuilder,
149 padding,
150 strideX,
151 strideY,
152 fused_activation_function,
153 dilationX,
154 dilationY).Union();
155 }
156
157 // create operator
Keith Davis892fafe2020-11-26 17:40:35 +0000158 const std::vector<int> operatorInputs{0, 1, 2};
159 const std::vector<int> operatorOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000160 flatbuffers::Offset <Operator> convolutionOperator =
161 CreateOperator(flatBufferBuilder,
162 0,
163 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
164 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
165 operatorBuiltinOptionsType,
166 operatorBuiltinOptions);
167
Keith Davis892fafe2020-11-26 17:40:35 +0000168 const std::vector<int> subgraphInputs{0, 1, 2};
169 const std::vector<int> subgraphOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000170 flatbuffers::Offset <SubGraph> subgraph =
171 CreateSubGraph(flatBufferBuilder,
172 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
173 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
174 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
175 flatBufferBuilder.CreateVector(&convolutionOperator, 1));
176
177 flatbuffers::Offset <flatbuffers::String> modelDescription =
178 flatBufferBuilder.CreateString("ArmnnDelegate: Convolution2d Operator Model");
179 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, convolutionOperatorCode);
180
181 flatbuffers::Offset <Model> flatbufferModel =
182 CreateModel(flatBufferBuilder,
183 TFLITE_SCHEMA_VERSION,
184 flatBufferBuilder.CreateVector(&operatorCode, 1),
185 flatBufferBuilder.CreateVector(&subgraph, 1),
186 modelDescription,
187 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
188
189 flatBufferBuilder.Finish(flatbufferModel);
190
191 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
192 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
193}
194
195template <typename T, typename B = float>
196void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode,
197 tflite::TensorType tensorType,
198 uint32_t strideX,
199 uint32_t strideY,
200 uint32_t dilationX,
201 uint32_t dilationY,
202 tflite::Padding padding,
203 tflite::ActivationFunctionType fused_activation_function,
204 std::vector<armnn::BackendId>& backends,
205 std::vector<int32_t>& inputShape,
206 std::vector<int32_t>& filterShape,
207 std::vector<int32_t>& outputShape,
208 std::vector<T>& inputValues,
209 std::vector<T>& filterValues,
210 std::vector<T>& expectedOutputValues,
211 const std::vector<int32_t>& biasShape = {},
212 const std::vector<B>& biasValues = {},
Jan Eilers7612bd62021-04-06 17:29:03 +0100213 const std::vector<float> biasScales = {1.0f},
214 const std::vector<int64_t> biasOffsets = {0},
215 const std::vector<float> filterScales = {1.0f},
216 const std::vector<int64_t> filterOffsets = {0},
Sadik Armagan32ca1442020-11-13 17:51:56 +0000217 float outputQuantScale = 2.0f,
218 int outputQuantOffset = 0,
219 float quantScale = 1.0f,
220 int quantOffset = 0,
Jan Eilers7612bd62021-04-06 17:29:03 +0100221 int32_t depth_multiplier = 1,
222 int32_t filterQuantizationDim = 3)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000223
224{
225 using namespace tflite;
226
227 std::vector<char> modelBuffer;
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100228
Sadik Armagan32ca1442020-11-13 17:51:56 +0000229 modelBuffer = CreateConv2dTfLiteModel(convolutionOperatorCode,
230 tensorType,
231 strideX,
232 strideY,
233 dilationX,
234 dilationY,
235 padding,
236 fused_activation_function,
237 inputShape,
238 filterShape,
239 biasShape,
240 outputShape,
241 filterValues,
242 biasValues,
Jan Eilers7612bd62021-04-06 17:29:03 +0100243 biasScales,
244 biasOffsets,
245 filterScales,
246 filterOffsets,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000247 outputQuantScale,
248 outputQuantOffset,
249 quantScale,
250 quantOffset,
Jan Eilers7612bd62021-04-06 17:29:03 +0100251 depth_multiplier,
252 filterQuantizationDim);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000253
254
255 const Model* tfLiteModel = GetModel(modelBuffer.data());
256 // Create TfLite Interpreters
257 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
258 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
259 (&armnnDelegateInterpreter) == kTfLiteOk);
260 CHECK(armnnDelegateInterpreter != nullptr);
261 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
262
263 std::unique_ptr<Interpreter> tfLiteInterpreter;
264 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
265 (&tfLiteInterpreter) == kTfLiteOk);
266 CHECK(tfLiteInterpreter != nullptr);
267 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
268
269 // Create the ArmNN Delegate
270 armnnDelegate::DelegateOptions delegateOptions(backends);
271 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
272 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
273 armnnDelegate::TfLiteArmnnDelegateDelete);
274 CHECK(theArmnnDelegate != nullptr);
275 // Modify armnnDelegateInterpreter to use armnnDelegate
276 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
277
278 // Set input data
279 auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
280 auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
281 for (unsigned int i = 0; i < inputValues.size(); ++i)
282 {
283 tfLiteDelageInputData[i] = inputValues[i];
284 }
285
286 auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
287 auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
288 for (unsigned int i = 0; i < inputValues.size(); ++i)
289 {
290 armnnDelegateInputData[i] = inputValues[i];
291 }
292 // Run EnqueueWorkload
293 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
294 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
295
296 // Compare output data
297 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
298 auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
299 auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
300 auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
301 for (size_t i = 0; i < expectedOutputValues.size(); i++)
302 {
303 CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]);
304 CHECK(doctest::Approx(tfLiteDelagateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]);
305 CHECK(doctest::Approx(armnnDelegateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]);
306 }
307}
308
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100309// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
310#if defined(ARMNN_POST_TFLITE_2_5)
311template <typename T, typename B = float>
312std::vector<char> CreateConv3dTfLiteModel(tflite::BuiltinOperator convolutionOperatorCode,
313 tflite::TensorType tensorType,
314 std::vector<uint32_t> strides,
315 std::vector<uint32_t> dilation,
316 tflite::Padding padding,
317 tflite::ActivationFunctionType fused_activation_function,
318 const std::vector<int32_t>& inputTensorShape,
319 const std::vector<int32_t>& filterTensorShape,
320 const std::vector<int32_t>& biasTensorShape,
321 const std::vector<int32_t>& outputTensorShape,
322 const std::vector<T>& filterData,
323 const std::vector<B>& biasData,
324 const std::vector<float> biasScales = {1.0f},
325 const std::vector<int64_t> biasOffsets = {0},
326 const std::vector<float> filterScales = {1.0f},
327 const std::vector<int64_t> filterOffsets = {0},
328 float outputQuantScale = 2.0f,
329 int outputQuantOffset = 0,
330 float quantScale = 1.0f,
331 int quantOffset = 0,
332 int32_t depth_multiplier = 1,
333 int32_t filterQuantizationDim = 0)
334{
335 using namespace tflite;
336 flatbuffers::FlatBufferBuilder flatBufferBuilder;
337
338 std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +0000339 buffers[0] = CreateBuffer(flatBufferBuilder);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100340 buffers[1] = CreateBuffer(flatBufferBuilder,
341 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
342 sizeof(T) * filterData.size()));
343
344 buffers[2] = CreateBuffer(flatBufferBuilder,
345 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
346 sizeof(B) * biasData.size()));
347
348 auto quantizationParameters =
349 CreateQuantizationParameters(flatBufferBuilder,
350 0,
351 0,
352 flatBufferBuilder.CreateVector<float>({ quantScale }),
353 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
354 auto outputQuantizationParameters =
355 CreateQuantizationParameters(flatBufferBuilder,
356 0,
357 0,
358 flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
359 flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
360
361 auto filterQuantizationParameters =
362 CreateQuantizationParameters(flatBufferBuilder,
363 0,
364 0,
365 flatBufferBuilder.CreateVector<float>(filterScales),
366 flatBufferBuilder.CreateVector<int64_t>(filterOffsets),
367 tflite::QuantizationDetails_NONE,
368 0,
369 filterQuantizationDim);
370
371 auto biasQuantizationParameters =
372 CreateQuantizationParameters(flatBufferBuilder,
373 0,
374 0,
375 flatBufferBuilder.CreateVector<float>(biasScales),
376 flatBufferBuilder.CreateVector<int64_t>(biasOffsets));
377
378 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
379 tensors[0] = CreateTensor(flatBufferBuilder,
380 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
381 inputTensorShape.size()),
382 tensorType,
383 0,
384 flatBufferBuilder.CreateString("input"),
385 quantizationParameters);
386 tensors[1] = CreateTensor(flatBufferBuilder,
387 flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
388 filterTensorShape.size()),
389 tensorType,
390 1,
391 flatBufferBuilder.CreateString("filter"),
392 filterQuantizationParameters);
393
394 auto biasTensorType = ::tflite::TensorType_FLOAT32;
395 if (tensorType == ::tflite::TensorType_INT8 || tensorType == ::tflite::TensorType_UINT8)
396 {
397 biasTensorType = ::tflite::TensorType_INT32;
398 }
399 tensors[2] = CreateTensor(flatBufferBuilder,
400 flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
401 biasTensorType,
402 2,
403 flatBufferBuilder.CreateString("bias"),
404 biasQuantizationParameters);
405 tensors[3] = CreateTensor(flatBufferBuilder,
406 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
407 outputTensorShape.size()),
408 tensorType,
409 0,
410 flatBufferBuilder.CreateString("output"),
411 outputQuantizationParameters);
412
413 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_Conv3DOptions;
414 flatbuffers::Offset<void> operatorBuiltinOptions = CreateConv3DOptions(flatBufferBuilder,
415 padding,
416 strides[2], // Depth
417 strides[0], // Width
418 strides[1], // Height
419 fused_activation_function,
420 dilation[2],
421 dilation[0],
422 dilation[1]).Union();
423
424 // Create operator
425 const std::vector<int> operatorInputs{0, 1, 2};
426 const std::vector<int> operatorOutputs{3};
427 flatbuffers::Offset <Operator> convolutionOperator =
428 CreateOperator(flatBufferBuilder,
429 0,
430 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
431 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
432 operatorBuiltinOptionsType,
433 operatorBuiltinOptions);
434
435 const std::vector<int> subgraphInputs{0, 1, 2};
436 const std::vector<int> subgraphOutputs{3};
437 flatbuffers::Offset <SubGraph> subgraph =
438 CreateSubGraph(flatBufferBuilder,
439 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
440 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
441 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
442 flatBufferBuilder.CreateVector(&convolutionOperator, 1));
443
444 flatbuffers::Offset <flatbuffers::String> modelDescription =
445 flatBufferBuilder.CreateString("ArmnnDelegate: Convolution 3d Operator Model");
446
447 // If using an operator with a code greater than 127 then the enum value should be passed as the fifth
448 // parameter rather than the second like in other tests.
449 flatbuffers::Offset <OperatorCode> operatorCode =
450 CreateOperatorCode(flatBufferBuilder, 0, 0, 1, tflite::BuiltinOperator_CONV_3D);
451
452 flatbuffers::Offset <Model> flatbufferModel =
453 CreateModel(flatBufferBuilder,
454 TFLITE_SCHEMA_VERSION,
455 flatBufferBuilder.CreateVector(&operatorCode, 1),
456 flatBufferBuilder.CreateVector(&subgraph, 1),
457 modelDescription,
458 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
459
460 flatBufferBuilder.Finish(flatbufferModel);
461
462 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
463 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
464}
465
466template <typename T, typename B = float>
467void Convolution3dTest(tflite::BuiltinOperator convolutionOperatorCode,
468 tflite::TensorType tensorType,
469 std::vector<uint32_t> strides,
470 std::vector<uint32_t> dilation,
471 tflite::Padding padding,
472 tflite::ActivationFunctionType fused_activation_function,
473 std::vector<armnn::BackendId>& backends,
474 std::vector<int32_t>& inputShape,
475 std::vector<int32_t>& filterShape,
476 std::vector<int32_t>& outputShape,
477 std::vector<T>& inputValues,
478 std::vector<T>& filterValues,
479 std::vector<T>& expectedOutputValues,
480 const std::vector<int32_t>& biasShape = {},
481 const std::vector<B>& biasValues = {},
482 const std::vector<float> biasScales = {1.0f},
483 const std::vector<int64_t> biasOffsets = {0},
484 const std::vector<float> filterScales = {1.0f},
485 const std::vector<int64_t> filterOffsets = {0},
486 float outputQuantScale = 2.0f,
487 int outputQuantOffset = 0,
488 float quantScale = 1.0f,
489 int quantOffset = 0,
490 int32_t depth_multiplier = 1,
491 int32_t filterQuantizationDim = 3)
492{
493 using namespace tflite;
494
495 std::vector<char> modelBuffer;
496 modelBuffer = CreateConv3dTfLiteModel(convolutionOperatorCode,
497 tensorType,
498 strides,
499 dilation,
500 padding,
501 fused_activation_function,
502 inputShape,
503 filterShape,
504 biasShape,
505 outputShape,
506 filterValues,
507 biasValues,
508 biasScales,
509 biasOffsets,
510 filterScales,
511 filterOffsets,
512 outputQuantScale,
513 outputQuantOffset,
514 quantScale,
515 quantOffset,
516 depth_multiplier,
517 filterQuantizationDim);
518
519 const Model* tfLiteModel = GetModel(modelBuffer.data());
520
521 // Create TfLite Interpreters
522 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
523 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
524 (&armnnDelegateInterpreter) == kTfLiteOk);
525 CHECK(armnnDelegateInterpreter != nullptr);
526 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
527
528 std::unique_ptr<Interpreter> tfLiteInterpreter;
529 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
530 (&tfLiteInterpreter) == kTfLiteOk);
531 CHECK(tfLiteInterpreter != nullptr);
532 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
533
534 // Create the ArmNN Delegate
535 armnnDelegate::DelegateOptions delegateOptions(backends);
536 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
537 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
538 armnnDelegate::TfLiteArmnnDelegateDelete);
539 CHECK(theArmnnDelegate != nullptr);
540
541 // Modify armnnDelegateInterpreter to use armnnDelegate
542 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
543
544 // Set input data
545 armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
546 armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
547
548 // Run EnqueueWorkload
549 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
550 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
551
552 // Compare output data
553 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
554 auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
555 auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
556 auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
557
558 armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size(), 1);
559 armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size(), 1);
560 armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size(), 1);
561}
562#endif
563
Sadik Armagan32ca1442020-11-13 17:51:56 +0000564template <typename T>
565std::vector<char> CreateTransposeConvTfLiteModel(tflite::TensorType tensorType,
566 uint32_t strideX,
567 uint32_t strideY,
568 tflite::Padding padding,
569 const std::vector <int32_t>& transposeTensorShape,
570 const std::vector <int32_t>& filterTensorShape,
571 const std::vector <int32_t>& inputTensorShape,
572 const std::vector <int32_t>& outputTensorShape,
573 const std::vector <int32_t>& transposeData,
574 const std::vector <T>& filterData,
575 float filterScale = 1.0f,
576 int filterOffset = 0,
577 float outputQuantScale = 2.0f,
578 int outputQuantOffset = 0,
579 float quantScale = 1.0f,
580 int quantOffset = 0)
581{
582 using namespace tflite;
583 flatbuffers::FlatBufferBuilder flatBufferBuilder;
584
585 std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +0000586 buffers[0] = CreateBuffer(flatBufferBuilder);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000587 buffers[1] = CreateBuffer(flatBufferBuilder,
588 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(transposeData.data()),
589 sizeof(int32_t) * transposeData.size()));
590 buffers[2] = CreateBuffer(flatBufferBuilder,
591 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
592 sizeof(T) * filterData.size()));
593
594 auto quantizationParameters =
595 CreateQuantizationParameters(flatBufferBuilder,
596 0,
597 0,
598 flatBufferBuilder.CreateVector<float>({ quantScale }),
599 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
600 auto outputQuantizationParameters =
601 CreateQuantizationParameters(flatBufferBuilder,
602 0,
603 0,
604 flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
605 flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
606 auto filterQuantizationParameters =
607 CreateQuantizationParameters(flatBufferBuilder,
608 0,
609 0,
610 flatBufferBuilder.CreateVector<float>({ filterScale }),
611 flatBufferBuilder.CreateVector<int64_t>({ filterOffset }));
612
613 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
614 tensors[0] = CreateTensor(flatBufferBuilder,
615 flatBufferBuilder.CreateVector<int32_t>(transposeTensorShape.data(),
616 transposeTensorShape.size()),
617 tflite::TensorType_INT32,
618 1);
619 tensors[1] = CreateTensor(flatBufferBuilder,
620 flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
621 filterTensorShape.size()),
622 tensorType,
623 2,
624 flatBufferBuilder.CreateString("filter"),
625 filterQuantizationParameters);
626 tensors[2] = CreateTensor(flatBufferBuilder,
627 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
628 inputTensorShape.size()),
629 tensorType,
630 0,
631 flatBufferBuilder.CreateString("input"),
632 quantizationParameters);
633 tensors[3] = CreateTensor(flatBufferBuilder,
634 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
635 outputTensorShape.size()),
636 tensorType,
637 0,
638 flatBufferBuilder.CreateString("output"),
639 outputQuantizationParameters);
640
641 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_TransposeConvOptions;
642 flatbuffers::Offset<void> operatorBuiltinOptions =
643 CreateTransposeConvOptions(flatBufferBuilder, padding, strideX, strideY).Union();
644
645 // create operator
Keith Davis892fafe2020-11-26 17:40:35 +0000646 const std::vector<int> operatorInputs{0, 1, 2};
647 const std::vector<int> operatorOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000648 flatbuffers::Offset <Operator> convolutionOperator =
649 CreateOperator(flatBufferBuilder,
650 0,
651 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
652 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
653 operatorBuiltinOptionsType,
654 operatorBuiltinOptions);
655
Keith Davis892fafe2020-11-26 17:40:35 +0000656 const std::vector<int> subgraphInputs{0, 1, 2};
657 const std::vector<int> subgraphOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000658 flatbuffers::Offset <SubGraph> subgraph =
659 CreateSubGraph(flatBufferBuilder,
660 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
661 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
662 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
663 flatBufferBuilder.CreateVector(&convolutionOperator, 1));
664
665 flatbuffers::Offset <flatbuffers::String> modelDescription =
666 flatBufferBuilder.CreateString("ArmnnDelegate: TransposeConv Operator Model");
667 flatbuffers::Offset <OperatorCode> operatorCode =
668 CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_TRANSPOSE_CONV);
669
670 flatbuffers::Offset <Model> flatbufferModel =
671 CreateModel(flatBufferBuilder,
672 TFLITE_SCHEMA_VERSION,
673 flatBufferBuilder.CreateVector(&operatorCode, 1),
674 flatBufferBuilder.CreateVector(&subgraph, 1),
675 modelDescription,
676 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
677
678 flatBufferBuilder.Finish(flatbufferModel);
679
680 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
681 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
682}
683
684template <typename T>
685void TransposeConvTest(std::vector<armnn::BackendId>& backends,
686 tflite::TensorType tensorType,
687 uint32_t strideX,
688 uint32_t strideY,
689 tflite::Padding padding,
690 const std::vector <int32_t>& transposeTensorShape,
691 const std::vector <int32_t>& filterTensorShape,
692 const std::vector <int32_t>& inputTensorShape,
693 const std::vector <int32_t>& outputTensorShape,
694 const std::vector <int32_t>& transposeData,
695 const std::vector <T>& filterData,
696 std::vector<T>& inputValues,
697 std::vector<T>& expectedOutputValues,
698 float filterScale = 1.0f,
699 int filterOffset = 0,
700 float outputQuantScale = 1.0f,
701 int outputQuantOffset = 0,
702 float quantScale = 1.0f,
703 int quantOffset = 0)
704{
705 using namespace tflite;
706
707 std::vector<char> modelBuffer;
708 modelBuffer = CreateTransposeConvTfLiteModel<T>(tensorType,
709 strideX,
710 strideY,
711 padding,
712 transposeTensorShape,
713 filterTensorShape,
714 inputTensorShape,
715 outputTensorShape,
716 transposeData,
717 filterData,
718 filterScale,
719 filterOffset,
720 outputQuantScale,
721 outputQuantOffset,
722 quantScale,
723 quantOffset);
724
725
726 const Model* tfLiteModel = GetModel(modelBuffer.data());
727 // Create TfLite Interpreters
728 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
729 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
730 (&armnnDelegateInterpreter) == kTfLiteOk);
731 CHECK(armnnDelegateInterpreter != nullptr);
732 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
733
734 std::unique_ptr<Interpreter> tfLiteInterpreter;
735 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
736 (&tfLiteInterpreter) == kTfLiteOk);
737 CHECK(tfLiteInterpreter != nullptr);
738 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
739
740 // Create the ArmNN Delegate
741 armnnDelegate::DelegateOptions delegateOptions(backends);
742 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
743 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
744 armnnDelegate::TfLiteArmnnDelegateDelete);
745 CHECK(theArmnnDelegate != nullptr);
746 // Modify armnnDelegateInterpreter to use armnnDelegate
747 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
748
749 // Set input data
750 auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[2];
751 auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
752 for (unsigned int i = 0; i < inputValues.size(); ++i)
753 {
754 tfLiteDelageInputData[i] = inputValues[i];
755 }
756
757 auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[2];
758 auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
759 for (unsigned int i = 0; i < inputValues.size(); ++i)
760 {
761 armnnDelegateInputData[i] = inputValues[i];
762 }
763 // Run EnqueueWorkload
764 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
765 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
766
767 // Compare output data
768 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
769 auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
770 auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
771 auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
772 for (size_t i = 0; i < expectedOutputValues.size(); i++)
773 {
774 CHECK(armnnDelegateOutputData[i] == expectedOutputValues[i]);
775 CHECK(tfLiteDelagateOutputData[i] == expectedOutputValues[i]);
776 CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]);
777 }
778}
779
780} // anonymous namespace
781
782
783
784