blob: d46721577ea73c1c909d8be9062568f579a37dda [file] [log] [blame]
Sadik Armagan34fa1bd2020-11-27 12:40:52 +00001//
Colm Donelan7bcae3c2024-01-22 10:07:14 +00002// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan34fa1bd2020-11-27 12:40:52 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
Matthew Sloyanebe392d2023-03-30 10:12:08 +010011#include <DelegateTestInterpreter.hpp>
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000012
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000013#include <tensorflow/lite/version.h>
14
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000015namespace
16{
17
18std::vector<char> CreateSplitTfLiteModel(tflite::TensorType tensorType,
19 std::vector<int32_t>& axisTensorShape,
20 std::vector<int32_t>& inputTensorShape,
21 const std::vector<std::vector<int32_t>>& outputTensorShapes,
22 std::vector<int32_t>& axisData,
23 const int32_t numSplits,
24 float quantScale = 1.0f,
25 int quantOffset = 0)
26{
27 using namespace tflite;
28 flatbuffers::FlatBufferBuilder flatBufferBuilder;
29
Ryan OShea238ecd92023-03-07 11:44:23 +000030 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
31 buffers.push_back(CreateBuffer(flatBufferBuilder));
32 buffers.push_back(CreateBuffer(flatBufferBuilder));
33 buffers.push_back(CreateBuffer(flatBufferBuilder,
34 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
35 sizeof(int32_t) * axisData.size())));
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000036
37 auto quantizationParameters =
38 CreateQuantizationParameters(flatBufferBuilder,
39 0,
40 0,
41 flatBufferBuilder.CreateVector<float>({ quantScale }),
42 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
43
44 std::array<flatbuffers::Offset<Tensor>, 4> tensors;
45 tensors[0] = CreateTensor(flatBufferBuilder,
46 flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
47 axisTensorShape.size()),
48 ::tflite::TensorType_INT32,
Ryan OShea238ecd92023-03-07 11:44:23 +000049 2,
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000050 flatBufferBuilder.CreateString("axis"),
51 quantizationParameters);
52 tensors[1] = CreateTensor(flatBufferBuilder,
53 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
54 inputTensorShape.size()),
55 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000056 1,
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000057 flatBufferBuilder.CreateString("input"),
58 quantizationParameters);
59
60 // Create output tensor
61 for (unsigned int i = 0; i < outputTensorShapes.size(); ++i)
62 {
Ryan OShea238ecd92023-03-07 11:44:23 +000063 buffers.push_back(CreateBuffer(flatBufferBuilder));
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000064 tensors[i + 2] = CreateTensor(flatBufferBuilder,
Ryan OShea238ecd92023-03-07 11:44:23 +000065 flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
66 outputTensorShapes[i].size()),
67 tensorType,
68 (i+3),
69 flatBufferBuilder.CreateString("output"),
70 quantizationParameters);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000071 }
72
73 // create operator. Mean uses ReducerOptions.
74 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SplitOptions;
75 flatbuffers::Offset<void> operatorBuiltinOptions = CreateSplitOptions(flatBufferBuilder, numSplits).Union();
76
77 const std::vector<int> operatorInputs{ {0, 1} };
78 const std::vector<int> operatorOutputs{ {2, 3} };
79 flatbuffers::Offset <Operator> controlOperator =
80 CreateOperator(flatBufferBuilder,
81 0,
82 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
83 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
84 operatorBuiltinOptionsType,
85 operatorBuiltinOptions);
86
87 const std::vector<int> subgraphInputs{ {0, 1} };
88 const std::vector<int> subgraphOutputs{ {2, 3} };
89 flatbuffers::Offset <SubGraph> subgraph =
90 CreateSubGraph(flatBufferBuilder,
91 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
92 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
93 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
94 flatBufferBuilder.CreateVector(&controlOperator, 1));
95
96 flatbuffers::Offset <flatbuffers::String> modelDescription =
97 flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT Operator Model");
98 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT);
99
100 flatbuffers::Offset <Model> flatbufferModel =
101 CreateModel(flatBufferBuilder,
102 TFLITE_SCHEMA_VERSION,
103 flatBufferBuilder.CreateVector(&operatorCode, 1),
104 flatBufferBuilder.CreateVector(&subgraph, 1),
105 modelDescription,
Ryan OShea238ecd92023-03-07 11:44:23 +0000106 flatBufferBuilder.CreateVector(buffers));
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000107
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100108 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000109
110 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
111 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
112}
113
114template <typename T>
115void SplitTest(tflite::TensorType tensorType,
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000116 std::vector<int32_t>& axisTensorShape,
117 std::vector<int32_t>& inputTensorShape,
118 std::vector<std::vector<int32_t>>& outputTensorShapes,
119 std::vector<int32_t>& axisData,
120 std::vector<T>& inputValues,
121 std::vector<std::vector<T>>& expectedOutputValues,
122 const int32_t numSplits,
Colm Donelan7bcae3c2024-01-22 10:07:14 +0000123 const std::vector<armnn::BackendId>& backends = {},
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000124 float quantScale = 1.0f,
125 int quantOffset = 0)
126{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100127 using namespace delegateTestInterpreter;
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000128 std::vector<char> modelBuffer = CreateSplitTfLiteModel(tensorType,
129 axisTensorShape,
130 inputTensorShape,
131 outputTensorShapes,
132 axisData,
133 numSplits,
134 quantScale,
135 quantOffset);
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100136 // Setup interpreter with just TFLite Runtime.
137 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
138 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
139 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 1) == kTfLiteOk);
140 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000141
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100142 // Setup interpreter with Arm NN Delegate applied.
Colm Donelan7bcae3c2024-01-22 10:07:14 +0000143 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100144 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
145 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 1) == kTfLiteOk);
146 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000147
148 // Compare output data
149 for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
150 {
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100151 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(i);
152 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(i);
153
154 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(i);
155 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(i);
156
157 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]);
158 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShapes[i]);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000159 }
160
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100161 tfLiteInterpreter.Cleanup();
162 armnnInterpreter.Cleanup();
163
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000164} // End of SPLIT Test
165
166std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
167 std::vector<int32_t>& inputTensorShape,
168 std::vector<int32_t>& splitsTensorShape,
169 std::vector<int32_t>& axisTensorShape,
170 const std::vector<std::vector<int32_t>>& outputTensorShapes,
171 std::vector<int32_t>& splitsData,
172 std::vector<int32_t>& axisData,
173 const int32_t numSplits,
174 float quantScale = 1.0f,
175 int quantOffset = 0)
176{
177 using namespace tflite;
178 flatbuffers::FlatBufferBuilder flatBufferBuilder;
179
180 std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
181 buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
182 buffers[1] = CreateBuffer(flatBufferBuilder,
183 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(splitsData.data()),
184 sizeof(int32_t) * splitsData.size()));
185 buffers[2] = CreateBuffer(flatBufferBuilder,
186 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
187 sizeof(int32_t) * axisData.size()));
188
189 auto quantizationParameters =
Ryan OShea238ecd92023-03-07 11:44:23 +0000190 CreateQuantizationParameters(flatBufferBuilder,
191 0,
192 0,
193 flatBufferBuilder.CreateVector<float>({ quantScale }),
194 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000195
196 std::array<flatbuffers::Offset<Tensor>, 5> tensors;
197 tensors[0] = CreateTensor(flatBufferBuilder,
198 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
199 inputTensorShape.size()),
200 tensorType,
201 0,
202 flatBufferBuilder.CreateString("input"),
203 quantizationParameters);
204 tensors[1] = CreateTensor(flatBufferBuilder,
205 flatBufferBuilder.CreateVector<int32_t>(splitsTensorShape.data(),
206 splitsTensorShape.size()),
207 ::tflite::TensorType_INT32,
208 1,
209 flatBufferBuilder.CreateString("splits"),
210 quantizationParameters);
211 tensors[2] = CreateTensor(flatBufferBuilder,
212 flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
213 axisTensorShape.size()),
214 ::tflite::TensorType_INT32,
215 2,
216 flatBufferBuilder.CreateString("axis"),
217 quantizationParameters);
218
219 // Create output tensor
220 for (unsigned int i = 0; i < outputTensorShapes.size(); ++i)
221 {
222 tensors[i + 3] = CreateTensor(flatBufferBuilder,
223 flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
224 outputTensorShapes[i].size()),
225 tensorType,
226 0,
227 flatBufferBuilder.CreateString("output"),
228 quantizationParameters);
229 }
230
231 // create operator. Mean uses ReducerOptions.
232 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SplitVOptions;
233 flatbuffers::Offset<void> operatorBuiltinOptions = CreateSplitVOptions(flatBufferBuilder, numSplits).Union();
234
235 const std::vector<int> operatorInputs{ {0, 1, 2} };
236 const std::vector<int> operatorOutputs{ {3, 4} };
237 flatbuffers::Offset <Operator> controlOperator =
Ryan OShea238ecd92023-03-07 11:44:23 +0000238 CreateOperator(flatBufferBuilder,
239 0,
240 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
241 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
242 operatorBuiltinOptionsType,
243 operatorBuiltinOptions);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000244
245 const std::vector<int> subgraphInputs{ {0, 1, 2} };
246 const std::vector<int> subgraphOutputs{ {3, 4} };
247 flatbuffers::Offset <SubGraph> subgraph =
Ryan OShea238ecd92023-03-07 11:44:23 +0000248 CreateSubGraph(flatBufferBuilder,
249 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
250 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
251 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
252 flatBufferBuilder.CreateVector(&controlOperator, 1));
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000253
254 flatbuffers::Offset <flatbuffers::String> modelDescription =
Ryan OShea238ecd92023-03-07 11:44:23 +0000255 flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT_V Operator Model");
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000256 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT_V);
257
258 flatbuffers::Offset <Model> flatbufferModel =
Ryan OShea238ecd92023-03-07 11:44:23 +0000259 CreateModel(flatBufferBuilder,
260 TFLITE_SCHEMA_VERSION,
261 flatBufferBuilder.CreateVector(&operatorCode, 1),
262 flatBufferBuilder.CreateVector(&subgraph, 1),
263 modelDescription,
264 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000265
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100266 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000267
268 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
269 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
270}
271
272template <typename T>
273void SplitVTest(tflite::TensorType tensorType,
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000274 std::vector<int32_t>& inputTensorShape,
275 std::vector<int32_t>& splitsTensorShape,
276 std::vector<int32_t>& axisTensorShape,
277 std::vector<std::vector<int32_t>>& outputTensorShapes,
278 std::vector<T>& inputValues,
279 std::vector<int32_t>& splitsData,
280 std::vector<int32_t>& axisData,
281 std::vector<std::vector<T>>& expectedOutputValues,
282 const int32_t numSplits,
Colm Donelan7bcae3c2024-01-22 10:07:14 +0000283 const std::vector<armnn::BackendId>& backends = {},
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000284 float quantScale = 1.0f,
285 int quantOffset = 0)
286{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100287 using namespace delegateTestInterpreter;
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000288 std::vector<char> modelBuffer = CreateSplitVTfLiteModel(tensorType,
289 inputTensorShape,
290 splitsTensorShape,
291 axisTensorShape,
292 outputTensorShapes,
293 splitsData,
294 axisData,
295 numSplits,
296 quantScale,
297 quantOffset);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000298
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100299 // Setup interpreter with just TFLite Runtime.
300 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
301 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
302 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
303 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000304
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100305 // Setup interpreter with Arm NN Delegate applied.
Colm Donelan7bcae3c2024-01-22 10:07:14 +0000306 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100307 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
308 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
309 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000310
311 // Compare output data
312 for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
313 {
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100314 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(i);
315 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(i);
316
317 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(i);
318 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(i);
319
320 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]);
321 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShapes[i]);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000322 }
323
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100324 tfLiteInterpreter.Cleanup();
325 armnnInterpreter.Cleanup();
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000326} // End of SPLIT_V Test
327
328} // anonymous namespace