blob: 7e147de31f2e1548c48cb7e8800a05feec930a28 [file] [log] [blame]
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn_delegate.hpp>
9
10#include "ConvolutionTestHelper.hpp"
11#include "TestUtils.hpp"
12
13#include <flatbuffers/flatbuffers.h>
14#include <tensorflow/lite/interpreter.h>
15#include <tensorflow/lite/kernels/register.h>
16#include <tensorflow/lite/model.h>
17#include <tensorflow/lite/schema/schema_generated.h>
18#include <tensorflow/lite/version.h>
19
20#include <doctest/doctest.h>
21
22namespace
23{
24
25struct StreamRedirector
26{
27public:
28 StreamRedirector(std::ostream &stream, std::streambuf *newStreamBuffer)
29 : m_Stream(stream), m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer)) {}
30
31 ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); }
32
33private:
34 std::ostream &m_Stream;
35 std::streambuf *m_BackupBuffer;
36};
37
38std::vector<char> CreateAddDivTfLiteModel(tflite::TensorType tensorType,
39 const std::vector<int32_t>& tensorShape,
40 float quantScale = 1.0f,
41 int quantOffset = 0)
42{
43 using namespace tflite;
44 flatbuffers::FlatBufferBuilder flatBufferBuilder;
45
46 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
47 buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
48
49 auto quantizationParameters =
50 CreateQuantizationParameters(flatBufferBuilder,
51 0,
52 0,
53 flatBufferBuilder.CreateVector<float>({ quantScale }),
54 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
55
56
57 std::array<flatbuffers::Offset<Tensor>, 5> tensors;
58 tensors[0] = CreateTensor(flatBufferBuilder,
59 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
60 tensorShape.size()),
61 tensorType,
62 0,
63 flatBufferBuilder.CreateString("input_0"),
64 quantizationParameters);
65 tensors[1] = CreateTensor(flatBufferBuilder,
66 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
67 tensorShape.size()),
68 tensorType,
69 0,
70 flatBufferBuilder.CreateString("input_1"),
71 quantizationParameters);
72 tensors[2] = CreateTensor(flatBufferBuilder,
73 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
74 tensorShape.size()),
75 tensorType,
76 0,
77 flatBufferBuilder.CreateString("input_2"),
78 quantizationParameters);
79 tensors[3] = CreateTensor(flatBufferBuilder,
80 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
81 tensorShape.size()),
82 tensorType,
83 0,
84 flatBufferBuilder.CreateString("add"),
85 quantizationParameters);
86 tensors[4] = CreateTensor(flatBufferBuilder,
87 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
88 tensorShape.size()),
89 tensorType,
90 0,
91 flatBufferBuilder.CreateString("output"),
92 quantizationParameters);
93
94 // create operator
95 tflite::BuiltinOptions addBuiltinOptionsType = tflite::BuiltinOptions_AddOptions;
96 flatbuffers::Offset<void> addBuiltinOptions =
97 CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union();
98
99 tflite::BuiltinOptions divBuiltinOptionsType = tflite::BuiltinOptions_DivOptions;
100 flatbuffers::Offset<void> divBuiltinOptions =
101 CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union();
102
103 std::array<flatbuffers::Offset<Operator>, 2> operators;
104 const std::vector<int32_t> addInputs{0, 1};
105 const std::vector<int32_t> addOutputs{3};
106 operators[0] = CreateOperator(flatBufferBuilder,
107 0,
108 flatBufferBuilder.CreateVector<int32_t>(addInputs.data(), addInputs.size()),
109 flatBufferBuilder.CreateVector<int32_t>(addOutputs.data(), addOutputs.size()),
110 addBuiltinOptionsType,
111 addBuiltinOptions);
112 const std::vector<int32_t> divInputs{3, 2};
113 const std::vector<int32_t> divOutputs{4};
114 operators[1] = CreateOperator(flatBufferBuilder,
115 1,
116 flatBufferBuilder.CreateVector<int32_t>(divInputs.data(), divInputs.size()),
117 flatBufferBuilder.CreateVector<int32_t>(divOutputs.data(), divOutputs.size()),
118 divBuiltinOptionsType,
119 divBuiltinOptions);
120
121 const std::vector<int> subgraphInputs{0, 1, 2};
122 const std::vector<int> subgraphOutputs{4};
123 flatbuffers::Offset<SubGraph> subgraph =
124 CreateSubGraph(flatBufferBuilder,
125 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
126 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
127 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
128 flatBufferBuilder.CreateVector(operators.data(), operators.size()));
129
130 flatbuffers::Offset<flatbuffers::String> modelDescription =
131 flatBufferBuilder.CreateString("ArmnnDelegate: Add and Div Operator Model");
132
133 std::array<flatbuffers::Offset<OperatorCode>, 2> codes;
134 codes[0] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_ADD);
135 codes[1] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_DIV);
136
137 flatbuffers::Offset<Model> flatbufferModel =
138 CreateModel(flatBufferBuilder,
139 TFLITE_SCHEMA_VERSION,
140 flatBufferBuilder.CreateVector(codes.data(), codes.size()),
141 flatBufferBuilder.CreateVector(&subgraph, 1),
142 modelDescription,
143 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
144
145 flatBufferBuilder.Finish(flatbufferModel);
146
147 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
148 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
149}
150
Sadik Armaganca565c12022-08-16 12:17:24 +0100151std::vector<char> CreateCeilTfLiteModel(tflite::TensorType tensorType,
152 const std::vector <int32_t>& tensorShape,
153 float quantScale = 1.0f,
154 int quantOffset = 0)
155{
156 using namespace tflite;
157 flatbuffers::FlatBufferBuilder flatBufferBuilder;
158
159 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
160 buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
161
162 auto quantizationParameters =
163 CreateQuantizationParameters(flatBufferBuilder,
164 0,
165 0,
166 flatBufferBuilder.CreateVector<float>({quantScale}),
167 flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
168
169 std::array<flatbuffers::Offset<Tensor>, 2> tensors;
170 tensors[0] = CreateTensor(flatBufferBuilder,
171 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
172 tensorShape.size()),
173 tensorType,
174 0,
175 flatBufferBuilder.CreateString("input"),
176 quantizationParameters);
177 tensors[1] = CreateTensor(flatBufferBuilder,
178 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
179 tensorShape.size()),
180 tensorType,
181 0,
182 flatBufferBuilder.CreateString("output"),
183 quantizationParameters);
184
185 const std::vector<int32_t> operatorInputs({0});
186 const std::vector<int32_t> operatorOutputs({1});
187
188 flatbuffers::Offset<Operator> ceilOperator =
189 CreateOperator(flatBufferBuilder,
190 0,
191 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
192 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
193 BuiltinOptions_NONE);
194
195 flatbuffers::Offset<flatbuffers::String> modelDescription =
196 flatBufferBuilder.CreateString("ArmnnDelegate: CEIL Operator Model");
197 flatbuffers::Offset<OperatorCode> operatorCode =
198 CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_CEIL);
199
200 const std::vector<int32_t> subgraphInputs({0});
201 const std::vector<int32_t> subgraphOutputs({1});
202 flatbuffers::Offset<SubGraph> subgraph =
203 CreateSubGraph(flatBufferBuilder,
204 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
205 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
206 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
207 flatBufferBuilder.CreateVector(&ceilOperator, 1));
208
209 flatbuffers::Offset<Model> flatbufferModel =
210 CreateModel(flatBufferBuilder,
211 TFLITE_SCHEMA_VERSION,
212 flatBufferBuilder.CreateVector(&operatorCode, 1),
213 flatBufferBuilder.CreateVector(&subgraph, 1),
214 modelDescription,
215 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
216
217 flatBufferBuilder.Finish(flatbufferModel);
218 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
219 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
220}
221
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000222template <typename T>
223void DelegateOptionTest(tflite::TensorType tensorType,
224 const std::vector<armnn::BackendId>& backends,
225 std::vector<int32_t>& tensorShape,
226 std::vector<T>& input0Values,
227 std::vector<T>& input1Values,
228 std::vector<T>& input2Values,
229 std::vector<T>& expectedOutputValues,
230 const armnnDelegate::DelegateOptions& delegateOptions,
231 float quantScale = 1.0f,
232 int quantOffset = 0)
233{
234 using namespace tflite;
235 std::vector<char> modelBuffer = CreateAddDivTfLiteModel(tensorType,
236 tensorShape,
237 quantScale,
238 quantOffset);
239
240 const Model* tfLiteModel = GetModel(modelBuffer.data());
241 // Create TfLite Interpreters
242 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
243 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
244 (&armnnDelegateInterpreter) == kTfLiteOk);
245 CHECK(armnnDelegateInterpreter != nullptr);
246 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
247
248 std::unique_ptr<Interpreter> tfLiteInterpreter;
249 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
250 (&tfLiteInterpreter) == kTfLiteOk);
251 CHECK(tfLiteInterpreter != nullptr);
252 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
253
254 // Create the ArmNN Delegate
255 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
256 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
257 armnnDelegate::TfLiteArmnnDelegateDelete);
258 CHECK(theArmnnDelegate != nullptr);
259 // Modify armnnDelegateInterpreter to use armnnDelegate
260 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
261
262 // Set input data
263 armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values);
264 armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values);
265 armnnDelegate::FillInput(tfLiteInterpreter, 2, input2Values);
266
267 armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values);
268 armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values);
269 armnnDelegate::FillInput(armnnDelegateInterpreter, 2, input2Values);
270
271 // Run EnqueueWorkload
272 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
273 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
274
275 armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
276
277 armnnDelegateInterpreter.reset(nullptr);
278}
279
Sadik Armaganca565c12022-08-16 12:17:24 +0100280template <typename T>
281void DelegateOptionNoFallbackTest(tflite::TensorType tensorType,
282 const std::vector<armnn::BackendId>& backends,
283 std::vector<int32_t>& tensorShape,
284 std::vector<T>& inputValues,
285 std::vector<T>& expectedOutputValues,
286 const armnnDelegate::DelegateOptions& delegateOptions,
287 float quantScale = 1.0f,
288 int quantOffset = 0)
289{
290 using namespace tflite;
291 std::vector<char> modelBuffer = CreateCeilTfLiteModel(tensorType,
292 tensorShape,
293 quantScale,
294 quantOffset);
295
296 const Model* tfLiteModel = GetModel(modelBuffer.data());
297 // Create TfLite Interpreters
298 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
299 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
300 (&armnnDelegateInterpreter) == kTfLiteOk);
301 CHECK(armnnDelegateInterpreter != nullptr);
302 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
303
304 std::unique_ptr<Interpreter> tfLiteInterpreter;
305 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
306 (&tfLiteInterpreter) == kTfLiteOk);
307 CHECK(tfLiteInterpreter != nullptr);
308 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
309
310 // Create the ArmNN Delegate
311 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
312 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
313 armnnDelegate::TfLiteArmnnDelegateDelete);
314 CHECK(theArmnnDelegate != nullptr);
315 // Modify armnnDelegateInterpreter to use armnnDelegate
316 try
317 {
318 armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get());
319 }
320 catch (const armnn::Exception& e)
321 {
322 // Forward the exception message to std::cout
323 std::cout << e.what() << std::endl;
324 }
325
326 // Set input data
327 armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
328 armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
329
330 // Run EnqueueWorkload
331 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
332 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
333
334 armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
335
336 armnnDelegateInterpreter.reset(nullptr);
337}
338
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000339} // anonymous namespace