blob: 31fc7d5e466d1df2639c20e1d9c42efe38d44d76 [file] [log] [blame]
Sadik Armagan34fa1bd2020-11-27 12:40:52 +00001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
11
12#include <flatbuffers/flatbuffers.h>
13#include <tensorflow/lite/interpreter.h>
14#include <tensorflow/lite/kernels/register.h>
15#include <tensorflow/lite/model.h>
16#include <tensorflow/lite/schema/schema_generated.h>
17#include <tensorflow/lite/version.h>
18
19#include <doctest/doctest.h>
20
21#include <string>
22
23namespace
24{
25
26std::vector<char> CreateSplitTfLiteModel(tflite::TensorType tensorType,
27 std::vector<int32_t>& axisTensorShape,
28 std::vector<int32_t>& inputTensorShape,
29 const std::vector<std::vector<int32_t>>& outputTensorShapes,
30 std::vector<int32_t>& axisData,
31 const int32_t numSplits,
32 float quantScale = 1.0f,
33 int quantOffset = 0)
34{
35 using namespace tflite;
36 flatbuffers::FlatBufferBuilder flatBufferBuilder;
37
38 std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
39 buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
40 buffers[1] = CreateBuffer(flatBufferBuilder,
41 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
42 sizeof(int32_t) * axisData.size()));
43
44 auto quantizationParameters =
45 CreateQuantizationParameters(flatBufferBuilder,
46 0,
47 0,
48 flatBufferBuilder.CreateVector<float>({ quantScale }),
49 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
50
51 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
52 tensors[0] = CreateTensor(flatBufferBuilder,
53 flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
54 axisTensorShape.size()),
55 ::tflite::TensorType_INT32,
56 1,
57 flatBufferBuilder.CreateString("axis"),
58 quantizationParameters);
59 tensors[1] = CreateTensor(flatBufferBuilder,
60 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
61 inputTensorShape.size()),
62 tensorType,
63 0,
64 flatBufferBuilder.CreateString("input"),
65 quantizationParameters);
66
67 // Create output tensor
68 for (unsigned int i = 0; i < outputTensorShapes.size(); ++i)
69 {
70 tensors[i + 2] = CreateTensor(flatBufferBuilder,
71 flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
72 outputTensorShapes[i].size()),
73 tensorType,
74 0,
75 flatBufferBuilder.CreateString("output"),
76 quantizationParameters);
77 }
78
79 // create operator. Mean uses ReducerOptions.
80 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SplitOptions;
81 flatbuffers::Offset<void> operatorBuiltinOptions = CreateSplitOptions(flatBufferBuilder, numSplits).Union();
82
83 const std::vector<int> operatorInputs{ {0, 1} };
84 const std::vector<int> operatorOutputs{ {2, 3} };
85 flatbuffers::Offset <Operator> controlOperator =
86 CreateOperator(flatBufferBuilder,
87 0,
88 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
89 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
90 operatorBuiltinOptionsType,
91 operatorBuiltinOptions);
92
93 const std::vector<int> subgraphInputs{ {0, 1} };
94 const std::vector<int> subgraphOutputs{ {2, 3} };
95 flatbuffers::Offset <SubGraph> subgraph =
96 CreateSubGraph(flatBufferBuilder,
97 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
98 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
99 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
100 flatBufferBuilder.CreateVector(&controlOperator, 1));
101
102 flatbuffers::Offset <flatbuffers::String> modelDescription =
103 flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT Operator Model");
104 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT);
105
106 flatbuffers::Offset <Model> flatbufferModel =
107 CreateModel(flatBufferBuilder,
108 TFLITE_SCHEMA_VERSION,
109 flatBufferBuilder.CreateVector(&operatorCode, 1),
110 flatBufferBuilder.CreateVector(&subgraph, 1),
111 modelDescription,
112 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
113
114 flatBufferBuilder.Finish(flatbufferModel);
115
116 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
117 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
118}
119
120template <typename T>
121void SplitTest(tflite::TensorType tensorType,
122 std::vector<armnn::BackendId>& backends,
123 std::vector<int32_t>& axisTensorShape,
124 std::vector<int32_t>& inputTensorShape,
125 std::vector<std::vector<int32_t>>& outputTensorShapes,
126 std::vector<int32_t>& axisData,
127 std::vector<T>& inputValues,
128 std::vector<std::vector<T>>& expectedOutputValues,
129 const int32_t numSplits,
130 float quantScale = 1.0f,
131 int quantOffset = 0)
132{
133 using namespace tflite;
134 std::vector<char> modelBuffer = CreateSplitTfLiteModel(tensorType,
135 axisTensorShape,
136 inputTensorShape,
137 outputTensorShapes,
138 axisData,
139 numSplits,
140 quantScale,
141 quantOffset);
142 const Model* tfLiteModel = GetModel(modelBuffer.data());
143
144 // Create TfLite Interpreters
145 std::unique_ptr<Interpreter> armnnDelegate;
146 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
147 (&armnnDelegate) == kTfLiteOk);
148 CHECK(armnnDelegate != nullptr);
149 CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
150
151 std::unique_ptr<Interpreter> tfLiteDelegate;
152 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
153 (&tfLiteDelegate) == kTfLiteOk);
154 CHECK(tfLiteDelegate != nullptr);
155 CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
156
157 // Create the ArmNN Delegate
158 armnnDelegate::DelegateOptions delegateOptions(backends);
159 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
160 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
161 armnnDelegate::TfLiteArmnnDelegateDelete);
162 CHECK(theArmnnDelegate != nullptr);
163
164 // Modify armnnDelegateInterpreter to use armnnDelegate
165 CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
166
167 // Set input data
168 armnnDelegate::FillInput<T>(tfLiteDelegate, 1, inputValues);
169 armnnDelegate::FillInput<T>(armnnDelegate, 1, inputValues);
170
171 // Run EnqueWorkload
172 CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
173 CHECK(armnnDelegate->Invoke() == kTfLiteOk);
174
175 // Compare output data
176 for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
177 {
178 armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
179 armnnDelegate,
180 outputTensorShapes[i],
181 expectedOutputValues[i],
182 i);
183 }
184
185 tfLiteDelegate.reset(nullptr);
186 armnnDelegate.reset(nullptr);
187} // End of SPLIT Test
188
189std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
190 std::vector<int32_t>& inputTensorShape,
191 std::vector<int32_t>& splitsTensorShape,
192 std::vector<int32_t>& axisTensorShape,
193 const std::vector<std::vector<int32_t>>& outputTensorShapes,
194 std::vector<int32_t>& splitsData,
195 std::vector<int32_t>& axisData,
196 const int32_t numSplits,
197 float quantScale = 1.0f,
198 int quantOffset = 0)
199{
200 using namespace tflite;
201 flatbuffers::FlatBufferBuilder flatBufferBuilder;
202
203 std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
204 buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
205 buffers[1] = CreateBuffer(flatBufferBuilder,
206 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(splitsData.data()),
207 sizeof(int32_t) * splitsData.size()));
208 buffers[2] = CreateBuffer(flatBufferBuilder,
209 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
210 sizeof(int32_t) * axisData.size()));
211
212 auto quantizationParameters =
213 CreateQuantizationParameters(flatBufferBuilder,
214 0,
215 0,
216 flatBufferBuilder.CreateVector<float>({ quantScale }),
217 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
218
219 std::array<flatbuffers::Offset<Tensor>, 5> tensors;
220 tensors[0] = CreateTensor(flatBufferBuilder,
221 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
222 inputTensorShape.size()),
223 tensorType,
224 0,
225 flatBufferBuilder.CreateString("input"),
226 quantizationParameters);
227 tensors[1] = CreateTensor(flatBufferBuilder,
228 flatBufferBuilder.CreateVector<int32_t>(splitsTensorShape.data(),
229 splitsTensorShape.size()),
230 ::tflite::TensorType_INT32,
231 1,
232 flatBufferBuilder.CreateString("splits"),
233 quantizationParameters);
234 tensors[2] = CreateTensor(flatBufferBuilder,
235 flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
236 axisTensorShape.size()),
237 ::tflite::TensorType_INT32,
238 2,
239 flatBufferBuilder.CreateString("axis"),
240 quantizationParameters);
241
242 // Create output tensor
243 for (unsigned int i = 0; i < outputTensorShapes.size(); ++i)
244 {
245 tensors[i + 3] = CreateTensor(flatBufferBuilder,
246 flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
247 outputTensorShapes[i].size()),
248 tensorType,
249 0,
250 flatBufferBuilder.CreateString("output"),
251 quantizationParameters);
252 }
253
254 // create operator. Mean uses ReducerOptions.
255 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SplitVOptions;
256 flatbuffers::Offset<void> operatorBuiltinOptions = CreateSplitVOptions(flatBufferBuilder, numSplits).Union();
257
258 const std::vector<int> operatorInputs{ {0, 1, 2} };
259 const std::vector<int> operatorOutputs{ {3, 4} };
260 flatbuffers::Offset <Operator> controlOperator =
261 CreateOperator(flatBufferBuilder,
262 0,
263 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
264 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
265 operatorBuiltinOptionsType,
266 operatorBuiltinOptions);
267
268 const std::vector<int> subgraphInputs{ {0, 1, 2} };
269 const std::vector<int> subgraphOutputs{ {3, 4} };
270 flatbuffers::Offset <SubGraph> subgraph =
271 CreateSubGraph(flatBufferBuilder,
272 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
273 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
274 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
275 flatBufferBuilder.CreateVector(&controlOperator, 1));
276
277 flatbuffers::Offset <flatbuffers::String> modelDescription =
278 flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT_V Operator Model");
279 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT_V);
280
281 flatbuffers::Offset <Model> flatbufferModel =
282 CreateModel(flatBufferBuilder,
283 TFLITE_SCHEMA_VERSION,
284 flatBufferBuilder.CreateVector(&operatorCode, 1),
285 flatBufferBuilder.CreateVector(&subgraph, 1),
286 modelDescription,
287 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
288
289 flatBufferBuilder.Finish(flatbufferModel);
290
291 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
292 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
293}
294
295template <typename T>
296void SplitVTest(tflite::TensorType tensorType,
297 std::vector<armnn::BackendId>& backends,
298 std::vector<int32_t>& inputTensorShape,
299 std::vector<int32_t>& splitsTensorShape,
300 std::vector<int32_t>& axisTensorShape,
301 std::vector<std::vector<int32_t>>& outputTensorShapes,
302 std::vector<T>& inputValues,
303 std::vector<int32_t>& splitsData,
304 std::vector<int32_t>& axisData,
305 std::vector<std::vector<T>>& expectedOutputValues,
306 const int32_t numSplits,
307 float quantScale = 1.0f,
308 int quantOffset = 0)
309{
310 using namespace tflite;
311 std::vector<char> modelBuffer = CreateSplitVTfLiteModel(tensorType,
312 inputTensorShape,
313 splitsTensorShape,
314 axisTensorShape,
315 outputTensorShapes,
316 splitsData,
317 axisData,
318 numSplits,
319 quantScale,
320 quantOffset);
321 const Model* tfLiteModel = GetModel(modelBuffer.data());
322
323 // Create TfLite Interpreters
324 std::unique_ptr<Interpreter> armnnDelegate;
325 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
326 (&armnnDelegate) == kTfLiteOk);
327 CHECK(armnnDelegate != nullptr);
328 CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
329
330 std::unique_ptr<Interpreter> tfLiteDelegate;
331 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
332 (&tfLiteDelegate) == kTfLiteOk);
333 CHECK(tfLiteDelegate != nullptr);
334 CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
335
336 // Create the ArmNN Delegate
337 armnnDelegate::DelegateOptions delegateOptions(backends);
338 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
339 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
340 armnnDelegate::TfLiteArmnnDelegateDelete);
341 CHECK(theArmnnDelegate != nullptr);
342
343 // Modify armnnDelegateInterpreter to use armnnDelegate
344 CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
345
346 // Set input data
347 armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
348 armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
349
350 // Run EnqueWorkload
351 CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
352 CHECK(armnnDelegate->Invoke() == kTfLiteOk);
353
354 // Compare output data
355 for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
356 {
357 armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
358 armnnDelegate,
359 outputTensorShapes[i],
360 expectedOutputValues[i],
361 i);
362 }
363
364 tfLiteDelegate.reset(nullptr);
365 armnnDelegate.reset(nullptr);
366} // End of SPLIT_V Test
367
368} // anonymous namespace