blob: e6890a2b2d903397c6af690d43229ae613d0904b [file] [log] [blame]
Sadik Armagan29b49cf2021-02-22 18:09:07 +00001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
11
12#include <flatbuffers/flatbuffers.h>
13#include <tensorflow/lite/interpreter.h>
14#include <tensorflow/lite/kernels/register.h>
15#include <tensorflow/lite/model.h>
16#include <tensorflow/lite/schema/schema_generated.h>
17#include <tensorflow/lite/version.h>
18
19#include <doctest/doctest.h>
20
21namespace
22{
23
24template <typename T>
25std::vector<char> CreateFillTfLiteModel(tflite::BuiltinOperator fillOperatorCode,
26 tflite::TensorType tensorType,
27 const std::vector<int32_t>& inputShape,
28 const std::vector <int32_t>& tensorShape,
29 const std::vector<T> fillValue)
30{
31 using namespace tflite;
32 flatbuffers::FlatBufferBuilder flatBufferBuilder;
33
34 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
35 buffers.push_back(
36 CreateBuffer(flatBufferBuilder,
37 flatBufferBuilder.CreateVector({})));
38 buffers.push_back(
39 CreateBuffer(flatBufferBuilder,
40 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(tensorShape.data()),
41 sizeof(int32_t) * tensorShape.size())));
42 buffers.push_back(
43 CreateBuffer(flatBufferBuilder,
44 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(fillValue.data()),
45 sizeof(T) * fillValue.size())));
46
47 std::array<flatbuffers::Offset<Tensor>, 3> tensors;
48 tensors[0] = CreateTensor(flatBufferBuilder,
49 flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
50 inputShape.size()),
51 tflite::TensorType_INT32,
52 1,
53 flatBufferBuilder.CreateString("dims"));
54
55 std::vector<int32_t> fillShape = {};
56 tensors[1] = CreateTensor(flatBufferBuilder,
57 flatBufferBuilder.CreateVector<int32_t>(fillShape.data(),
58 fillShape.size()),
59 tensorType,
60 2,
61 flatBufferBuilder.CreateString("value"));
62
63 tensors[2] = CreateTensor(flatBufferBuilder,
64 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
65 tensorShape.size()),
66 tensorType,
67 0,
68 flatBufferBuilder.CreateString("output"));
69
70 tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_FillOptions;
71 flatbuffers::Offset<void> operatorBuiltinOptions = CreateFillOptions(flatBufferBuilder).Union();
72
73 // create operator
74 const std::vector<int> operatorInputs{ {0, 1} };
75 const std::vector<int> operatorOutputs{ 2 };
76 flatbuffers::Offset <Operator> fillOperator =
77 CreateOperator(flatBufferBuilder,
78 0,
79 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
80 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
81 operatorBuiltinOptionsType,
82 operatorBuiltinOptions);
83
84 const std::vector<int> subgraphInputs{ {0, 1} };
85 const std::vector<int> subgraphOutputs{ 2 };
86 flatbuffers::Offset <SubGraph> subgraph =
87 CreateSubGraph(flatBufferBuilder,
88 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
89 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
90 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
91 flatBufferBuilder.CreateVector(&fillOperator, 1));
92
93 flatbuffers::Offset <flatbuffers::String> modelDescription =
94 flatBufferBuilder.CreateString("ArmnnDelegate: Fill Operator Model");
95 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
96 fillOperatorCode);
97
98 flatbuffers::Offset <Model> flatbufferModel =
99 CreateModel(flatBufferBuilder,
100 TFLITE_SCHEMA_VERSION,
101 flatBufferBuilder.CreateVector(&operatorCode, 1),
102 flatBufferBuilder.CreateVector(&subgraph, 1),
103 modelDescription,
104 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
105
106 flatBufferBuilder.Finish(flatbufferModel);
107
108 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
109 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
110
111}
112
113template <typename T>
114void FillTest(tflite::BuiltinOperator fillOperatorCode,
115 tflite::TensorType tensorType,
116 const std::vector<armnn::BackendId>& backends,
117 std::vector<int32_t >& inputShape,
118 std::vector<int32_t >& tensorShape,
119 std::vector<T>& expectedOutputValues,
120 T fillValue)
121{
122 using namespace tflite;
123 std::vector<char> modelBuffer = CreateFillTfLiteModel<T>(fillOperatorCode,
124 tensorType,
125 inputShape,
126 tensorShape,
127 {fillValue});
128
129 const Model* tfLiteModel = GetModel(modelBuffer.data());
130 CHECK(tfLiteModel != nullptr);
131
132 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
133 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
134 (&armnnDelegateInterpreter) == kTfLiteOk);
135 CHECK(armnnDelegateInterpreter != nullptr);
136 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
137
138 std::unique_ptr<Interpreter> tfLiteInterpreter;
139 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
140 (&tfLiteInterpreter) == kTfLiteOk);
141 CHECK(tfLiteInterpreter != nullptr);
142 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
143
144 // Create the ArmNN Delegate
145 armnnDelegate::DelegateOptions delegateOptions(backends);
146 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
147 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
148 armnnDelegate::TfLiteArmnnDelegateDelete);
149 CHECK(theArmnnDelegate != nullptr);
150 // Modify armnnDelegateInterpreter to use armnnDelegate
151 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
152
153 // Run EnqueueWorkload
154 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
155 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
156
157 armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
158}
159
160} // anonymous namespace