blob: 00a3d959047f7f10bbf8d484a7402ecb8638cd19 [file] [log] [blame]
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00001//
Ryan OShea238ecd92023-03-07 11:44:23 +00002// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn_delegate.hpp>
9
10#include "ConvolutionTestHelper.hpp"
11#include "TestUtils.hpp"
12
13#include <flatbuffers/flatbuffers.h>
14#include <tensorflow/lite/interpreter.h>
15#include <tensorflow/lite/kernels/register.h>
16#include <tensorflow/lite/model.h>
17#include <tensorflow/lite/schema/schema_generated.h>
18#include <tensorflow/lite/version.h>
19
20#include <doctest/doctest.h>
21
22namespace
23{
24
25struct StreamRedirector
26{
27public:
28 StreamRedirector(std::ostream &stream, std::streambuf *newStreamBuffer)
29 : m_Stream(stream), m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer)) {}
30
31 ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); }
32
33private:
34 std::ostream &m_Stream;
35 std::streambuf *m_BackupBuffer;
36};
37
38std::vector<char> CreateAddDivTfLiteModel(tflite::TensorType tensorType,
39 const std::vector<int32_t>& tensorShape,
40 float quantScale = 1.0f,
41 int quantOffset = 0)
42{
43 using namespace tflite;
44 flatbuffers::FlatBufferBuilder flatBufferBuilder;
45
46 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +000047 buffers.push_back(CreateBuffer(flatBufferBuilder));
48 buffers.push_back(CreateBuffer(flatBufferBuilder));
49 buffers.push_back(CreateBuffer(flatBufferBuilder));
50 buffers.push_back(CreateBuffer(flatBufferBuilder));
51 buffers.push_back(CreateBuffer(flatBufferBuilder));
52 buffers.push_back(CreateBuffer(flatBufferBuilder));
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000053
54 auto quantizationParameters =
55 CreateQuantizationParameters(flatBufferBuilder,
56 0,
57 0,
58 flatBufferBuilder.CreateVector<float>({ quantScale }),
59 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
60
61
62 std::array<flatbuffers::Offset<Tensor>, 5> tensors;
63 tensors[0] = CreateTensor(flatBufferBuilder,
64 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
65 tensorShape.size()),
66 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000067 1,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000068 flatBufferBuilder.CreateString("input_0"),
69 quantizationParameters);
70 tensors[1] = CreateTensor(flatBufferBuilder,
71 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
72 tensorShape.size()),
73 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000074 2,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000075 flatBufferBuilder.CreateString("input_1"),
76 quantizationParameters);
77 tensors[2] = CreateTensor(flatBufferBuilder,
78 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
79 tensorShape.size()),
80 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000081 3,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000082 flatBufferBuilder.CreateString("input_2"),
83 quantizationParameters);
84 tensors[3] = CreateTensor(flatBufferBuilder,
85 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
86 tensorShape.size()),
87 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000088 4,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000089 flatBufferBuilder.CreateString("add"),
90 quantizationParameters);
91 tensors[4] = CreateTensor(flatBufferBuilder,
92 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
93 tensorShape.size()),
94 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000095 5,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000096 flatBufferBuilder.CreateString("output"),
97 quantizationParameters);
98
99 // create operator
100 tflite::BuiltinOptions addBuiltinOptionsType = tflite::BuiltinOptions_AddOptions;
101 flatbuffers::Offset<void> addBuiltinOptions =
102 CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union();
103
104 tflite::BuiltinOptions divBuiltinOptionsType = tflite::BuiltinOptions_DivOptions;
105 flatbuffers::Offset<void> divBuiltinOptions =
106 CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union();
107
108 std::array<flatbuffers::Offset<Operator>, 2> operators;
109 const std::vector<int32_t> addInputs{0, 1};
110 const std::vector<int32_t> addOutputs{3};
111 operators[0] = CreateOperator(flatBufferBuilder,
112 0,
113 flatBufferBuilder.CreateVector<int32_t>(addInputs.data(), addInputs.size()),
114 flatBufferBuilder.CreateVector<int32_t>(addOutputs.data(), addOutputs.size()),
115 addBuiltinOptionsType,
116 addBuiltinOptions);
117 const std::vector<int32_t> divInputs{3, 2};
118 const std::vector<int32_t> divOutputs{4};
119 operators[1] = CreateOperator(flatBufferBuilder,
120 1,
121 flatBufferBuilder.CreateVector<int32_t>(divInputs.data(), divInputs.size()),
122 flatBufferBuilder.CreateVector<int32_t>(divOutputs.data(), divOutputs.size()),
123 divBuiltinOptionsType,
124 divBuiltinOptions);
125
126 const std::vector<int> subgraphInputs{0, 1, 2};
127 const std::vector<int> subgraphOutputs{4};
128 flatbuffers::Offset<SubGraph> subgraph =
129 CreateSubGraph(flatBufferBuilder,
130 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
131 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
132 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
133 flatBufferBuilder.CreateVector(operators.data(), operators.size()));
134
135 flatbuffers::Offset<flatbuffers::String> modelDescription =
136 flatBufferBuilder.CreateString("ArmnnDelegate: Add and Div Operator Model");
137
138 std::array<flatbuffers::Offset<OperatorCode>, 2> codes;
139 codes[0] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_ADD);
140 codes[1] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_DIV);
141
142 flatbuffers::Offset<Model> flatbufferModel =
143 CreateModel(flatBufferBuilder,
144 TFLITE_SCHEMA_VERSION,
145 flatBufferBuilder.CreateVector(codes.data(), codes.size()),
146 flatBufferBuilder.CreateVector(&subgraph, 1),
147 modelDescription,
148 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
149
150 flatBufferBuilder.Finish(flatbufferModel);
151
152 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
153 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
154}
155
Sadik Armaganca565c12022-08-16 12:17:24 +0100156std::vector<char> CreateCeilTfLiteModel(tflite::TensorType tensorType,
157 const std::vector <int32_t>& tensorShape,
158 float quantScale = 1.0f,
159 int quantOffset = 0)
160{
161 using namespace tflite;
162 flatbuffers::FlatBufferBuilder flatBufferBuilder;
163
164 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +0000165 buffers.push_back(CreateBuffer(flatBufferBuilder));
Sadik Armaganca565c12022-08-16 12:17:24 +0100166
167 auto quantizationParameters =
168 CreateQuantizationParameters(flatBufferBuilder,
169 0,
170 0,
171 flatBufferBuilder.CreateVector<float>({quantScale}),
172 flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
173
174 std::array<flatbuffers::Offset<Tensor>, 2> tensors;
175 tensors[0] = CreateTensor(flatBufferBuilder,
176 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
177 tensorShape.size()),
178 tensorType,
179 0,
180 flatBufferBuilder.CreateString("input"),
181 quantizationParameters);
182 tensors[1] = CreateTensor(flatBufferBuilder,
183 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
184 tensorShape.size()),
185 tensorType,
186 0,
187 flatBufferBuilder.CreateString("output"),
188 quantizationParameters);
189
190 const std::vector<int32_t> operatorInputs({0});
191 const std::vector<int32_t> operatorOutputs({1});
192
193 flatbuffers::Offset<Operator> ceilOperator =
194 CreateOperator(flatBufferBuilder,
195 0,
196 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
197 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
198 BuiltinOptions_NONE);
199
200 flatbuffers::Offset<flatbuffers::String> modelDescription =
201 flatBufferBuilder.CreateString("ArmnnDelegate: CEIL Operator Model");
202 flatbuffers::Offset<OperatorCode> operatorCode =
203 CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_CEIL);
204
205 const std::vector<int32_t> subgraphInputs({0});
206 const std::vector<int32_t> subgraphOutputs({1});
207 flatbuffers::Offset<SubGraph> subgraph =
208 CreateSubGraph(flatBufferBuilder,
209 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
210 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
211 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
212 flatBufferBuilder.CreateVector(&ceilOperator, 1));
213
214 flatbuffers::Offset<Model> flatbufferModel =
215 CreateModel(flatBufferBuilder,
216 TFLITE_SCHEMA_VERSION,
217 flatBufferBuilder.CreateVector(&operatorCode, 1),
218 flatBufferBuilder.CreateVector(&subgraph, 1),
219 modelDescription,
220 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
221
222 flatBufferBuilder.Finish(flatbufferModel);
223 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
224 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
225}
226
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000227template <typename T>
228void DelegateOptionTest(tflite::TensorType tensorType,
229 const std::vector<armnn::BackendId>& backends,
230 std::vector<int32_t>& tensorShape,
231 std::vector<T>& input0Values,
232 std::vector<T>& input1Values,
233 std::vector<T>& input2Values,
234 std::vector<T>& expectedOutputValues,
235 const armnnDelegate::DelegateOptions& delegateOptions,
236 float quantScale = 1.0f,
237 int quantOffset = 0)
238{
239 using namespace tflite;
240 std::vector<char> modelBuffer = CreateAddDivTfLiteModel(tensorType,
241 tensorShape,
242 quantScale,
243 quantOffset);
244
245 const Model* tfLiteModel = GetModel(modelBuffer.data());
246 // Create TfLite Interpreters
247 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
248 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
249 (&armnnDelegateInterpreter) == kTfLiteOk);
250 CHECK(armnnDelegateInterpreter != nullptr);
251 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
252
253 std::unique_ptr<Interpreter> tfLiteInterpreter;
254 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
255 (&tfLiteInterpreter) == kTfLiteOk);
256 CHECK(tfLiteInterpreter != nullptr);
257 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
258
259 // Create the ArmNN Delegate
260 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
261 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
262 armnnDelegate::TfLiteArmnnDelegateDelete);
263 CHECK(theArmnnDelegate != nullptr);
264 // Modify armnnDelegateInterpreter to use armnnDelegate
265 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
266
267 // Set input data
268 armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values);
269 armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values);
270 armnnDelegate::FillInput(tfLiteInterpreter, 2, input2Values);
271
272 armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values);
273 armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values);
274 armnnDelegate::FillInput(armnnDelegateInterpreter, 2, input2Values);
275
276 // Run EnqueueWorkload
277 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
278 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
279
280 armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
281
282 armnnDelegateInterpreter.reset(nullptr);
283}
284
Sadik Armaganca565c12022-08-16 12:17:24 +0100285template <typename T>
286void DelegateOptionNoFallbackTest(tflite::TensorType tensorType,
287 const std::vector<armnn::BackendId>& backends,
288 std::vector<int32_t>& tensorShape,
289 std::vector<T>& inputValues,
290 std::vector<T>& expectedOutputValues,
291 const armnnDelegate::DelegateOptions& delegateOptions,
292 float quantScale = 1.0f,
293 int quantOffset = 0)
294{
295 using namespace tflite;
296 std::vector<char> modelBuffer = CreateCeilTfLiteModel(tensorType,
297 tensorShape,
298 quantScale,
299 quantOffset);
300
301 const Model* tfLiteModel = GetModel(modelBuffer.data());
302 // Create TfLite Interpreters
303 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
304 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
305 (&armnnDelegateInterpreter) == kTfLiteOk);
306 CHECK(armnnDelegateInterpreter != nullptr);
307 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
308
309 std::unique_ptr<Interpreter> tfLiteInterpreter;
310 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
311 (&tfLiteInterpreter) == kTfLiteOk);
312 CHECK(tfLiteInterpreter != nullptr);
313 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
314
315 // Create the ArmNN Delegate
316 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
317 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
318 armnnDelegate::TfLiteArmnnDelegateDelete);
319 CHECK(theArmnnDelegate != nullptr);
320 // Modify armnnDelegateInterpreter to use armnnDelegate
321 try
322 {
323 armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get());
324 }
325 catch (const armnn::Exception& e)
326 {
327 // Forward the exception message to std::cout
328 std::cout << e.what() << std::endl;
329 }
330
331 // Set input data
332 armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
333 armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
334
335 // Run EnqueueWorkload
336 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
337 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
338
339 armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
340
341 armnnDelegateInterpreter.reset(nullptr);
342}
343
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000344} // anonymous namespace