blob: 1b33c1d74d488bd29d5d2faa7190a5de4e83d2f0 [file] [log] [blame]
Sadik Armagan32ca1442020-11-13 17:51:56 +00001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn_delegate.hpp>
9
10#include <flatbuffers/flatbuffers.h>
11#include <tensorflow/lite/interpreter.h>
12#include <tensorflow/lite/kernels/register.h>
13#include <tensorflow/lite/model.h>
14#include <tensorflow/lite/schema/schema_generated.h>
15#include <tensorflow/lite/version.h>
16
17#include <doctest/doctest.h>
18
19namespace
20{
21
22template <typename T, typename B = float>
23std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOperatorCode,
24 tflite::TensorType tensorType,
25 uint32_t strideX,
26 uint32_t strideY,
27 uint32_t dilationX,
28 uint32_t dilationY,
29 tflite::Padding padding,
30 tflite::ActivationFunctionType fused_activation_function,
31 const std::vector <int32_t>& inputTensorShape,
32 const std::vector <int32_t>& filterTensorShape,
33 const std::vector <int32_t>& biasTensorShape,
34 const std::vector <int32_t>& outputTensorShape,
35 const std::vector <T>& filterData,
36 const std::vector <B>& biasData,
Jan Eilers7612bd62021-04-06 17:29:03 +010037 const std::vector<float> biasScales = {1.0f},
38 const std::vector<int64_t> biasOffsets = {0},
39 const std::vector<float> filterScales = {1.0f},
40 const std::vector<int64_t> filterOffsets = {0},
Sadik Armagan32ca1442020-11-13 17:51:56 +000041 float outputQuantScale = 2.0f,
42 int outputQuantOffset = 0,
43 float quantScale = 1.0f,
44 int quantOffset = 0,
Jan Eilers7612bd62021-04-06 17:29:03 +010045 int32_t depth_multiplier = 1,
46 int32_t filterQuantizationDim = 0)
Sadik Armagan32ca1442020-11-13 17:51:56 +000047{
48 using namespace tflite;
49 flatbuffers::FlatBufferBuilder flatBufferBuilder;
50
51 std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
52 buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
53 buffers[1] = CreateBuffer(flatBufferBuilder,
54 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
55 sizeof(T) * filterData.size()));
56
57 buffers[2] = CreateBuffer(flatBufferBuilder,
58 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
59 sizeof(B) * biasData.size()));
60
61 auto quantizationParameters =
62 CreateQuantizationParameters(flatBufferBuilder,
63 0,
64 0,
65 flatBufferBuilder.CreateVector<float>({ quantScale }),
66 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
67 auto outputQuantizationParameters =
68 CreateQuantizationParameters(flatBufferBuilder,
69 0,
70 0,
71 flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
72 flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
Jan Eilers7612bd62021-04-06 17:29:03 +010073
Sadik Armagan32ca1442020-11-13 17:51:56 +000074 auto filterQuantizationParameters =
Jan Eilers7612bd62021-04-06 17:29:03 +010075 CreateQuantizationParameters(flatBufferBuilder,
76 0,
77 0,
78 flatBufferBuilder.CreateVector<float>(filterScales),
79 flatBufferBuilder.CreateVector<int64_t>(filterOffsets),
80 tflite::QuantizationDetails_NONE,
81 0,
82 filterQuantizationDim);
83
84 auto biasQuantizationParameters =
85 CreateQuantizationParameters(flatBufferBuilder,
86 0,
87 0,
88 flatBufferBuilder.CreateVector<float>(biasScales),
89 flatBufferBuilder.CreateVector<int64_t>(biasOffsets));
Sadik Armagan32ca1442020-11-13 17:51:56 +000090
91 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
92 tensors[0] = CreateTensor(flatBufferBuilder,
93 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
94 inputTensorShape.size()),
95 tensorType,
96 0,
97 flatBufferBuilder.CreateString("input"),
98 quantizationParameters);
99 tensors[1] = CreateTensor(flatBufferBuilder,
100 flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
101 filterTensorShape.size()),
102 tensorType,
103 1,
104 flatBufferBuilder.CreateString("filter"),
105 filterQuantizationParameters);
106
107 auto biasTensorType = ::tflite::TensorType_FLOAT32;
Jan Eilerseb616122020-11-20 11:59:40 +0000108 if (tensorType == ::tflite::TensorType_INT8 || tensorType == ::tflite::TensorType_UINT8)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000109 {
110 biasTensorType = ::tflite::TensorType_INT32;
111 }
112 tensors[2] = CreateTensor(flatBufferBuilder,
113 flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
114 biasTensorType,
115 2,
116 flatBufferBuilder.CreateString("bias"),
Jan Eilers7612bd62021-04-06 17:29:03 +0100117 biasQuantizationParameters);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000118 tensors[3] = CreateTensor(flatBufferBuilder,
119 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
120 outputTensorShape.size()),
121 tensorType,
122 0,
123 flatBufferBuilder.CreateString("output"),
124 outputQuantizationParameters);
125
126 flatbuffers::Offset<void> operatorBuiltinOptions;
127 tflite::BuiltinOptions operatorBuiltinOptionsType;
128
129 if(convolutionOperatorCode == tflite::BuiltinOperator_DEPTHWISE_CONV_2D)
130 {
131 operatorBuiltinOptionsType = tflite::BuiltinOptions_DepthwiseConv2DOptions;
132 operatorBuiltinOptions = CreateDepthwiseConv2DOptions(flatBufferBuilder,
133 padding,
134 strideX,
135 strideY,
136 depth_multiplier,
137 fused_activation_function,
138 dilationX,
139 dilationY).Union();
140 }
141 if(convolutionOperatorCode == tflite::BuiltinOperator_CONV_2D)
142 {
143 operatorBuiltinOptionsType = tflite::BuiltinOptions_Conv2DOptions;
144 operatorBuiltinOptions = CreateConv2DOptions(flatBufferBuilder,
145 padding,
146 strideX,
147 strideY,
148 fused_activation_function,
149 dilationX,
150 dilationY).Union();
151 }
152
153 // create operator
Keith Davis892fafe2020-11-26 17:40:35 +0000154 const std::vector<int> operatorInputs{0, 1, 2};
155 const std::vector<int> operatorOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000156 flatbuffers::Offset <Operator> convolutionOperator =
157 CreateOperator(flatBufferBuilder,
158 0,
159 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
160 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
161 operatorBuiltinOptionsType,
162 operatorBuiltinOptions);
163
Keith Davis892fafe2020-11-26 17:40:35 +0000164 const std::vector<int> subgraphInputs{0, 1, 2};
165 const std::vector<int> subgraphOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000166 flatbuffers::Offset <SubGraph> subgraph =
167 CreateSubGraph(flatBufferBuilder,
168 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
169 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
170 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
171 flatBufferBuilder.CreateVector(&convolutionOperator, 1));
172
173 flatbuffers::Offset <flatbuffers::String> modelDescription =
174 flatBufferBuilder.CreateString("ArmnnDelegate: Convolution2d Operator Model");
175 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, convolutionOperatorCode);
176
177 flatbuffers::Offset <Model> flatbufferModel =
178 CreateModel(flatBufferBuilder,
179 TFLITE_SCHEMA_VERSION,
180 flatBufferBuilder.CreateVector(&operatorCode, 1),
181 flatBufferBuilder.CreateVector(&subgraph, 1),
182 modelDescription,
183 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
184
185 flatBufferBuilder.Finish(flatbufferModel);
186
187 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
188 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
189}
190
191template <typename T, typename B = float>
192void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode,
193 tflite::TensorType tensorType,
194 uint32_t strideX,
195 uint32_t strideY,
196 uint32_t dilationX,
197 uint32_t dilationY,
198 tflite::Padding padding,
199 tflite::ActivationFunctionType fused_activation_function,
200 std::vector<armnn::BackendId>& backends,
201 std::vector<int32_t>& inputShape,
202 std::vector<int32_t>& filterShape,
203 std::vector<int32_t>& outputShape,
204 std::vector<T>& inputValues,
205 std::vector<T>& filterValues,
206 std::vector<T>& expectedOutputValues,
207 const std::vector<int32_t>& biasShape = {},
208 const std::vector<B>& biasValues = {},
Jan Eilers7612bd62021-04-06 17:29:03 +0100209 const std::vector<float> biasScales = {1.0f},
210 const std::vector<int64_t> biasOffsets = {0},
211 const std::vector<float> filterScales = {1.0f},
212 const std::vector<int64_t> filterOffsets = {0},
Sadik Armagan32ca1442020-11-13 17:51:56 +0000213 float outputQuantScale = 2.0f,
214 int outputQuantOffset = 0,
215 float quantScale = 1.0f,
216 int quantOffset = 0,
Jan Eilers7612bd62021-04-06 17:29:03 +0100217 int32_t depth_multiplier = 1,
218 int32_t filterQuantizationDim = 3)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000219
220{
221 using namespace tflite;
222
223 std::vector<char> modelBuffer;
224 modelBuffer = CreateConv2dTfLiteModel(convolutionOperatorCode,
225 tensorType,
226 strideX,
227 strideY,
228 dilationX,
229 dilationY,
230 padding,
231 fused_activation_function,
232 inputShape,
233 filterShape,
234 biasShape,
235 outputShape,
236 filterValues,
237 biasValues,
Jan Eilers7612bd62021-04-06 17:29:03 +0100238 biasScales,
239 biasOffsets,
240 filterScales,
241 filterOffsets,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000242 outputQuantScale,
243 outputQuantOffset,
244 quantScale,
245 quantOffset,
Jan Eilers7612bd62021-04-06 17:29:03 +0100246 depth_multiplier,
247 filterQuantizationDim);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000248
249
250 const Model* tfLiteModel = GetModel(modelBuffer.data());
251 // Create TfLite Interpreters
252 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
253 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
254 (&armnnDelegateInterpreter) == kTfLiteOk);
255 CHECK(armnnDelegateInterpreter != nullptr);
256 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
257
258 std::unique_ptr<Interpreter> tfLiteInterpreter;
259 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
260 (&tfLiteInterpreter) == kTfLiteOk);
261 CHECK(tfLiteInterpreter != nullptr);
262 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
263
264 // Create the ArmNN Delegate
265 armnnDelegate::DelegateOptions delegateOptions(backends);
266 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
267 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
268 armnnDelegate::TfLiteArmnnDelegateDelete);
269 CHECK(theArmnnDelegate != nullptr);
270 // Modify armnnDelegateInterpreter to use armnnDelegate
271 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
272
273 // Set input data
274 auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
275 auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
276 for (unsigned int i = 0; i < inputValues.size(); ++i)
277 {
278 tfLiteDelageInputData[i] = inputValues[i];
279 }
280
281 auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
282 auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
283 for (unsigned int i = 0; i < inputValues.size(); ++i)
284 {
285 armnnDelegateInputData[i] = inputValues[i];
286 }
287 // Run EnqueueWorkload
288 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
289 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
290
291 // Compare output data
292 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
293 auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
294 auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
295 auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
296 for (size_t i = 0; i < expectedOutputValues.size(); i++)
297 {
298 CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]);
299 CHECK(doctest::Approx(tfLiteDelagateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]);
300 CHECK(doctest::Approx(armnnDelegateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]);
301 }
302}
303
304template <typename T>
305std::vector<char> CreateTransposeConvTfLiteModel(tflite::TensorType tensorType,
306 uint32_t strideX,
307 uint32_t strideY,
308 tflite::Padding padding,
309 const std::vector <int32_t>& transposeTensorShape,
310 const std::vector <int32_t>& filterTensorShape,
311 const std::vector <int32_t>& inputTensorShape,
312 const std::vector <int32_t>& outputTensorShape,
313 const std::vector <int32_t>& transposeData,
314 const std::vector <T>& filterData,
315 float filterScale = 1.0f,
316 int filterOffset = 0,
317 float outputQuantScale = 2.0f,
318 int outputQuantOffset = 0,
319 float quantScale = 1.0f,
320 int quantOffset = 0)
321{
322 using namespace tflite;
323 flatbuffers::FlatBufferBuilder flatBufferBuilder;
324
325 std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
326 buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
327 buffers[1] = CreateBuffer(flatBufferBuilder,
328 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(transposeData.data()),
329 sizeof(int32_t) * transposeData.size()));
330 buffers[2] = CreateBuffer(flatBufferBuilder,
331 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
332 sizeof(T) * filterData.size()));
333
334 auto quantizationParameters =
335 CreateQuantizationParameters(flatBufferBuilder,
336 0,
337 0,
338 flatBufferBuilder.CreateVector<float>({ quantScale }),
339 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
340 auto outputQuantizationParameters =
341 CreateQuantizationParameters(flatBufferBuilder,
342 0,
343 0,
344 flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
345 flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
346 auto filterQuantizationParameters =
347 CreateQuantizationParameters(flatBufferBuilder,
348 0,
349 0,
350 flatBufferBuilder.CreateVector<float>({ filterScale }),
351 flatBufferBuilder.CreateVector<int64_t>({ filterOffset }));
352
353 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
354 tensors[0] = CreateTensor(flatBufferBuilder,
355 flatBufferBuilder.CreateVector<int32_t>(transposeTensorShape.data(),
356 transposeTensorShape.size()),
357 tflite::TensorType_INT32,
358 1);
359 tensors[1] = CreateTensor(flatBufferBuilder,
360 flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
361 filterTensorShape.size()),
362 tensorType,
363 2,
364 flatBufferBuilder.CreateString("filter"),
365 filterQuantizationParameters);
366 tensors[2] = CreateTensor(flatBufferBuilder,
367 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
368 inputTensorShape.size()),
369 tensorType,
370 0,
371 flatBufferBuilder.CreateString("input"),
372 quantizationParameters);
373 tensors[3] = CreateTensor(flatBufferBuilder,
374 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
375 outputTensorShape.size()),
376 tensorType,
377 0,
378 flatBufferBuilder.CreateString("output"),
379 outputQuantizationParameters);
380
381 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_TransposeConvOptions;
382 flatbuffers::Offset<void> operatorBuiltinOptions =
383 CreateTransposeConvOptions(flatBufferBuilder, padding, strideX, strideY).Union();
384
385 // create operator
Keith Davis892fafe2020-11-26 17:40:35 +0000386 const std::vector<int> operatorInputs{0, 1, 2};
387 const std::vector<int> operatorOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000388 flatbuffers::Offset <Operator> convolutionOperator =
389 CreateOperator(flatBufferBuilder,
390 0,
391 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
392 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
393 operatorBuiltinOptionsType,
394 operatorBuiltinOptions);
395
Keith Davis892fafe2020-11-26 17:40:35 +0000396 const std::vector<int> subgraphInputs{0, 1, 2};
397 const std::vector<int> subgraphOutputs{3};
Sadik Armagan32ca1442020-11-13 17:51:56 +0000398 flatbuffers::Offset <SubGraph> subgraph =
399 CreateSubGraph(flatBufferBuilder,
400 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
401 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
402 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
403 flatBufferBuilder.CreateVector(&convolutionOperator, 1));
404
405 flatbuffers::Offset <flatbuffers::String> modelDescription =
406 flatBufferBuilder.CreateString("ArmnnDelegate: TransposeConv Operator Model");
407 flatbuffers::Offset <OperatorCode> operatorCode =
408 CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_TRANSPOSE_CONV);
409
410 flatbuffers::Offset <Model> flatbufferModel =
411 CreateModel(flatBufferBuilder,
412 TFLITE_SCHEMA_VERSION,
413 flatBufferBuilder.CreateVector(&operatorCode, 1),
414 flatBufferBuilder.CreateVector(&subgraph, 1),
415 modelDescription,
416 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
417
418 flatBufferBuilder.Finish(flatbufferModel);
419
420 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
421 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
422}
423
424template <typename T>
425void TransposeConvTest(std::vector<armnn::BackendId>& backends,
426 tflite::TensorType tensorType,
427 uint32_t strideX,
428 uint32_t strideY,
429 tflite::Padding padding,
430 const std::vector <int32_t>& transposeTensorShape,
431 const std::vector <int32_t>& filterTensorShape,
432 const std::vector <int32_t>& inputTensorShape,
433 const std::vector <int32_t>& outputTensorShape,
434 const std::vector <int32_t>& transposeData,
435 const std::vector <T>& filterData,
436 std::vector<T>& inputValues,
437 std::vector<T>& expectedOutputValues,
438 float filterScale = 1.0f,
439 int filterOffset = 0,
440 float outputQuantScale = 1.0f,
441 int outputQuantOffset = 0,
442 float quantScale = 1.0f,
443 int quantOffset = 0)
444{
445 using namespace tflite;
446
447 std::vector<char> modelBuffer;
448 modelBuffer = CreateTransposeConvTfLiteModel<T>(tensorType,
449 strideX,
450 strideY,
451 padding,
452 transposeTensorShape,
453 filterTensorShape,
454 inputTensorShape,
455 outputTensorShape,
456 transposeData,
457 filterData,
458 filterScale,
459 filterOffset,
460 outputQuantScale,
461 outputQuantOffset,
462 quantScale,
463 quantOffset);
464
465
466 const Model* tfLiteModel = GetModel(modelBuffer.data());
467 // Create TfLite Interpreters
468 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
469 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
470 (&armnnDelegateInterpreter) == kTfLiteOk);
471 CHECK(armnnDelegateInterpreter != nullptr);
472 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
473
474 std::unique_ptr<Interpreter> tfLiteInterpreter;
475 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
476 (&tfLiteInterpreter) == kTfLiteOk);
477 CHECK(tfLiteInterpreter != nullptr);
478 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
479
480 // Create the ArmNN Delegate
481 armnnDelegate::DelegateOptions delegateOptions(backends);
482 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
483 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
484 armnnDelegate::TfLiteArmnnDelegateDelete);
485 CHECK(theArmnnDelegate != nullptr);
486 // Modify armnnDelegateInterpreter to use armnnDelegate
487 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
488
489 // Set input data
490 auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[2];
491 auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
492 for (unsigned int i = 0; i < inputValues.size(); ++i)
493 {
494 tfLiteDelageInputData[i] = inputValues[i];
495 }
496
497 auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[2];
498 auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
499 for (unsigned int i = 0; i < inputValues.size(); ++i)
500 {
501 armnnDelegateInputData[i] = inputValues[i];
502 }
503 // Run EnqueueWorkload
504 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
505 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
506
507 // Compare output data
508 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
509 auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
510 auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
511 auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
512 for (size_t i = 0; i < expectedOutputValues.size(); i++)
513 {
514 CHECK(armnnDelegateOutputData[i] == expectedOutputValues[i]);
515 CHECK(tfLiteDelagateOutputData[i] == expectedOutputValues[i]);
516 CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]);
517 }
518}
519
520} // anonymous namespace
521
522
523
524