blob: 54c859c7642e319090b93fc36e324c9194e9b262 [file] [log] [blame]
Sadik Armagan34fa1bd2020-11-27 12:40:52 +00001//
Ryan OShea238ecd92023-03-07 11:44:23 +00002// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan34fa1bd2020-11-27 12:40:52 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
Matthew Sloyanebe392d2023-03-30 10:12:08 +010011#include <DelegateTestInterpreter.hpp>
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000012
13#include <flatbuffers/flatbuffers.h>
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000014#include <tensorflow/lite/kernels/register.h>
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000015#include <tensorflow/lite/version.h>
16
Matthew Sloyanebe392d2023-03-30 10:12:08 +010017#include <doctest/doctest.h>
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000018
19namespace
20{
21
22std::vector<char> CreateSplitTfLiteModel(tflite::TensorType tensorType,
23 std::vector<int32_t>& axisTensorShape,
24 std::vector<int32_t>& inputTensorShape,
25 const std::vector<std::vector<int32_t>>& outputTensorShapes,
26 std::vector<int32_t>& axisData,
27 const int32_t numSplits,
28 float quantScale = 1.0f,
29 int quantOffset = 0)
30{
31 using namespace tflite;
32 flatbuffers::FlatBufferBuilder flatBufferBuilder;
33
Ryan OShea238ecd92023-03-07 11:44:23 +000034 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
35 buffers.push_back(CreateBuffer(flatBufferBuilder));
36 buffers.push_back(CreateBuffer(flatBufferBuilder));
37 buffers.push_back(CreateBuffer(flatBufferBuilder,
38 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
39 sizeof(int32_t) * axisData.size())));
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000040
41 auto quantizationParameters =
42 CreateQuantizationParameters(flatBufferBuilder,
43 0,
44 0,
45 flatBufferBuilder.CreateVector<float>({ quantScale }),
46 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
47
48 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
49 tensors[0] = CreateTensor(flatBufferBuilder,
50 flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
51 axisTensorShape.size()),
52 ::tflite::TensorType_INT32,
Ryan OShea238ecd92023-03-07 11:44:23 +000053 2,
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000054 flatBufferBuilder.CreateString("axis"),
55 quantizationParameters);
56 tensors[1] = CreateTensor(flatBufferBuilder,
57 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
58 inputTensorShape.size()),
59 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000060 1,
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000061 flatBufferBuilder.CreateString("input"),
62 quantizationParameters);
63
64 // Create output tensor
65 for (unsigned int i = 0; i < outputTensorShapes.size(); ++i)
66 {
Ryan OShea238ecd92023-03-07 11:44:23 +000067 buffers.push_back(CreateBuffer(flatBufferBuilder));
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000068 tensors[i + 2] = CreateTensor(flatBufferBuilder,
Ryan OShea238ecd92023-03-07 11:44:23 +000069 flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
70 outputTensorShapes[i].size()),
71 tensorType,
72 (i+3),
73 flatBufferBuilder.CreateString("output"),
74 quantizationParameters);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000075 }
76
77 // create operator. Mean uses ReducerOptions.
78 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SplitOptions;
79 flatbuffers::Offset<void> operatorBuiltinOptions = CreateSplitOptions(flatBufferBuilder, numSplits).Union();
80
81 const std::vector<int> operatorInputs{ {0, 1} };
82 const std::vector<int> operatorOutputs{ {2, 3} };
83 flatbuffers::Offset <Operator> controlOperator =
84 CreateOperator(flatBufferBuilder,
85 0,
86 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
87 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
88 operatorBuiltinOptionsType,
89 operatorBuiltinOptions);
90
91 const std::vector<int> subgraphInputs{ {0, 1} };
92 const std::vector<int> subgraphOutputs{ {2, 3} };
93 flatbuffers::Offset <SubGraph> subgraph =
94 CreateSubGraph(flatBufferBuilder,
95 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
96 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
97 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
98 flatBufferBuilder.CreateVector(&controlOperator, 1));
99
100 flatbuffers::Offset <flatbuffers::String> modelDescription =
101 flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT Operator Model");
102 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT);
103
104 flatbuffers::Offset <Model> flatbufferModel =
105 CreateModel(flatBufferBuilder,
106 TFLITE_SCHEMA_VERSION,
107 flatBufferBuilder.CreateVector(&operatorCode, 1),
108 flatBufferBuilder.CreateVector(&subgraph, 1),
109 modelDescription,
Ryan OShea238ecd92023-03-07 11:44:23 +0000110 flatBufferBuilder.CreateVector(buffers));
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000111
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100112 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000113
114 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
115 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
116}
117
118template <typename T>
119void SplitTest(tflite::TensorType tensorType,
120 std::vector<armnn::BackendId>& backends,
121 std::vector<int32_t>& axisTensorShape,
122 std::vector<int32_t>& inputTensorShape,
123 std::vector<std::vector<int32_t>>& outputTensorShapes,
124 std::vector<int32_t>& axisData,
125 std::vector<T>& inputValues,
126 std::vector<std::vector<T>>& expectedOutputValues,
127 const int32_t numSplits,
128 float quantScale = 1.0f,
129 int quantOffset = 0)
130{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100131 using namespace delegateTestInterpreter;
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000132 std::vector<char> modelBuffer = CreateSplitTfLiteModel(tensorType,
133 axisTensorShape,
134 inputTensorShape,
135 outputTensorShapes,
136 axisData,
137 numSplits,
138 quantScale,
139 quantOffset);
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100140 // Setup interpreter with just TFLite Runtime.
141 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
142 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
143 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 1) == kTfLiteOk);
144 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000145
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100146 // Setup interpreter with Arm NN Delegate applied.
147 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
148 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
149 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 1) == kTfLiteOk);
150 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000151
152 // Compare output data
153 for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
154 {
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100155 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(i);
156 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(i);
157
158 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(i);
159 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(i);
160
161 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]);
162 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShapes[i]);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000163 }
164
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100165 tfLiteInterpreter.Cleanup();
166 armnnInterpreter.Cleanup();
167
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000168} // End of SPLIT Test
169
170std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
171 std::vector<int32_t>& inputTensorShape,
172 std::vector<int32_t>& splitsTensorShape,
173 std::vector<int32_t>& axisTensorShape,
174 const std::vector<std::vector<int32_t>>& outputTensorShapes,
175 std::vector<int32_t>& splitsData,
176 std::vector<int32_t>& axisData,
177 const int32_t numSplits,
178 float quantScale = 1.0f,
179 int quantOffset = 0)
180{
181 using namespace tflite;
182 flatbuffers::FlatBufferBuilder flatBufferBuilder;
183
184 std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
185 buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
186 buffers[1] = CreateBuffer(flatBufferBuilder,
187 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(splitsData.data()),
188 sizeof(int32_t) * splitsData.size()));
189 buffers[2] = CreateBuffer(flatBufferBuilder,
190 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
191 sizeof(int32_t) * axisData.size()));
192
193 auto quantizationParameters =
Ryan OShea238ecd92023-03-07 11:44:23 +0000194 CreateQuantizationParameters(flatBufferBuilder,
195 0,
196 0,
197 flatBufferBuilder.CreateVector<float>({ quantScale }),
198 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000199
200 std::array<flatbuffers::Offset<Tensor>, 5> tensors;
201 tensors[0] = CreateTensor(flatBufferBuilder,
202 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
203 inputTensorShape.size()),
204 tensorType,
205 0,
206 flatBufferBuilder.CreateString("input"),
207 quantizationParameters);
208 tensors[1] = CreateTensor(flatBufferBuilder,
209 flatBufferBuilder.CreateVector<int32_t>(splitsTensorShape.data(),
210 splitsTensorShape.size()),
211 ::tflite::TensorType_INT32,
212 1,
213 flatBufferBuilder.CreateString("splits"),
214 quantizationParameters);
215 tensors[2] = CreateTensor(flatBufferBuilder,
216 flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
217 axisTensorShape.size()),
218 ::tflite::TensorType_INT32,
219 2,
220 flatBufferBuilder.CreateString("axis"),
221 quantizationParameters);
222
223 // Create output tensor
224 for (unsigned int i = 0; i < outputTensorShapes.size(); ++i)
225 {
226 tensors[i + 3] = CreateTensor(flatBufferBuilder,
227 flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
228 outputTensorShapes[i].size()),
229 tensorType,
230 0,
231 flatBufferBuilder.CreateString("output"),
232 quantizationParameters);
233 }
234
235 // create operator. Mean uses ReducerOptions.
236 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SplitVOptions;
237 flatbuffers::Offset<void> operatorBuiltinOptions = CreateSplitVOptions(flatBufferBuilder, numSplits).Union();
238
239 const std::vector<int> operatorInputs{ {0, 1, 2} };
240 const std::vector<int> operatorOutputs{ {3, 4} };
241 flatbuffers::Offset <Operator> controlOperator =
Ryan OShea238ecd92023-03-07 11:44:23 +0000242 CreateOperator(flatBufferBuilder,
243 0,
244 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
245 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
246 operatorBuiltinOptionsType,
247 operatorBuiltinOptions);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000248
249 const std::vector<int> subgraphInputs{ {0, 1, 2} };
250 const std::vector<int> subgraphOutputs{ {3, 4} };
251 flatbuffers::Offset <SubGraph> subgraph =
Ryan OShea238ecd92023-03-07 11:44:23 +0000252 CreateSubGraph(flatBufferBuilder,
253 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
254 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
255 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
256 flatBufferBuilder.CreateVector(&controlOperator, 1));
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000257
258 flatbuffers::Offset <flatbuffers::String> modelDescription =
Ryan OShea238ecd92023-03-07 11:44:23 +0000259 flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT_V Operator Model");
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000260 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT_V);
261
262 flatbuffers::Offset <Model> flatbufferModel =
Ryan OShea238ecd92023-03-07 11:44:23 +0000263 CreateModel(flatBufferBuilder,
264 TFLITE_SCHEMA_VERSION,
265 flatBufferBuilder.CreateVector(&operatorCode, 1),
266 flatBufferBuilder.CreateVector(&subgraph, 1),
267 modelDescription,
268 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000269
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100270 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000271
272 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
273 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
274}
275
276template <typename T>
277void SplitVTest(tflite::TensorType tensorType,
278 std::vector<armnn::BackendId>& backends,
279 std::vector<int32_t>& inputTensorShape,
280 std::vector<int32_t>& splitsTensorShape,
281 std::vector<int32_t>& axisTensorShape,
282 std::vector<std::vector<int32_t>>& outputTensorShapes,
283 std::vector<T>& inputValues,
284 std::vector<int32_t>& splitsData,
285 std::vector<int32_t>& axisData,
286 std::vector<std::vector<T>>& expectedOutputValues,
287 const int32_t numSplits,
288 float quantScale = 1.0f,
289 int quantOffset = 0)
290{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100291 using namespace delegateTestInterpreter;
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000292 std::vector<char> modelBuffer = CreateSplitVTfLiteModel(tensorType,
293 inputTensorShape,
294 splitsTensorShape,
295 axisTensorShape,
296 outputTensorShapes,
297 splitsData,
298 axisData,
299 numSplits,
300 quantScale,
301 quantOffset);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000302
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100303 // Setup interpreter with just TFLite Runtime.
304 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
305 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
306 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
307 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000308
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100309 // Setup interpreter with Arm NN Delegate applied.
310 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
311 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
312 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
313 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000314
315 // Compare output data
316 for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
317 {
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100318 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(i);
319 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(i);
320
321 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(i);
322 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(i);
323
324 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]);
325 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShapes[i]);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000326 }
327
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100328 tfLiteInterpreter.Cleanup();
329 armnnInterpreter.Cleanup();
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000330} // End of SPLIT_V Test
331
332} // anonymous namespace