blob: 87a01e71ad4a6d2f729fa7f773d1bc78bd750fbb [file] [log] [blame]
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00001//
Colm Donelan7bcae3c2024-01-22 10:07:14 +00002// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00008#include "TestUtils.hpp"
9
Matthew Sloyanebe392d2023-03-30 10:12:08 +010010#include <armnn_delegate.hpp>
11#include <DelegateTestInterpreter.hpp>
12
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000013#include <tensorflow/lite/version.h>
14
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000015namespace
16{
17
18struct StreamRedirector
19{
20public:
21 StreamRedirector(std::ostream &stream, std::streambuf *newStreamBuffer)
22 : m_Stream(stream), m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer)) {}
23
24 ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); }
25
26private:
27 std::ostream &m_Stream;
28 std::streambuf *m_BackupBuffer;
29};
30
31std::vector<char> CreateAddDivTfLiteModel(tflite::TensorType tensorType,
32 const std::vector<int32_t>& tensorShape,
33 float quantScale = 1.0f,
34 int quantOffset = 0)
35{
36 using namespace tflite;
37 flatbuffers::FlatBufferBuilder flatBufferBuilder;
38
39 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +000040 buffers.push_back(CreateBuffer(flatBufferBuilder));
41 buffers.push_back(CreateBuffer(flatBufferBuilder));
42 buffers.push_back(CreateBuffer(flatBufferBuilder));
43 buffers.push_back(CreateBuffer(flatBufferBuilder));
44 buffers.push_back(CreateBuffer(flatBufferBuilder));
45 buffers.push_back(CreateBuffer(flatBufferBuilder));
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000046
47 auto quantizationParameters =
48 CreateQuantizationParameters(flatBufferBuilder,
49 0,
50 0,
51 flatBufferBuilder.CreateVector<float>({ quantScale }),
52 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
53
54
55 std::array<flatbuffers::Offset<Tensor>, 5> tensors;
56 tensors[0] = CreateTensor(flatBufferBuilder,
57 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
58 tensorShape.size()),
59 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000060 1,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000061 flatBufferBuilder.CreateString("input_0"),
62 quantizationParameters);
63 tensors[1] = CreateTensor(flatBufferBuilder,
64 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
65 tensorShape.size()),
66 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000067 2,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000068 flatBufferBuilder.CreateString("input_1"),
69 quantizationParameters);
70 tensors[2] = CreateTensor(flatBufferBuilder,
71 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
72 tensorShape.size()),
73 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000074 3,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000075 flatBufferBuilder.CreateString("input_2"),
76 quantizationParameters);
77 tensors[3] = CreateTensor(flatBufferBuilder,
78 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
79 tensorShape.size()),
80 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000081 4,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000082 flatBufferBuilder.CreateString("add"),
83 quantizationParameters);
84 tensors[4] = CreateTensor(flatBufferBuilder,
85 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
86 tensorShape.size()),
87 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000088 5,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000089 flatBufferBuilder.CreateString("output"),
90 quantizationParameters);
91
92 // create operator
93 tflite::BuiltinOptions addBuiltinOptionsType = tflite::BuiltinOptions_AddOptions;
94 flatbuffers::Offset<void> addBuiltinOptions =
95 CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union();
96
97 tflite::BuiltinOptions divBuiltinOptionsType = tflite::BuiltinOptions_DivOptions;
98 flatbuffers::Offset<void> divBuiltinOptions =
99 CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union();
100
101 std::array<flatbuffers::Offset<Operator>, 2> operators;
102 const std::vector<int32_t> addInputs{0, 1};
103 const std::vector<int32_t> addOutputs{3};
104 operators[0] = CreateOperator(flatBufferBuilder,
105 0,
106 flatBufferBuilder.CreateVector<int32_t>(addInputs.data(), addInputs.size()),
107 flatBufferBuilder.CreateVector<int32_t>(addOutputs.data(), addOutputs.size()),
108 addBuiltinOptionsType,
109 addBuiltinOptions);
110 const std::vector<int32_t> divInputs{3, 2};
111 const std::vector<int32_t> divOutputs{4};
112 operators[1] = CreateOperator(flatBufferBuilder,
113 1,
114 flatBufferBuilder.CreateVector<int32_t>(divInputs.data(), divInputs.size()),
115 flatBufferBuilder.CreateVector<int32_t>(divOutputs.data(), divOutputs.size()),
116 divBuiltinOptionsType,
117 divBuiltinOptions);
118
119 const std::vector<int> subgraphInputs{0, 1, 2};
120 const std::vector<int> subgraphOutputs{4};
121 flatbuffers::Offset<SubGraph> subgraph =
122 CreateSubGraph(flatBufferBuilder,
123 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
124 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
125 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
126 flatBufferBuilder.CreateVector(operators.data(), operators.size()));
127
128 flatbuffers::Offset<flatbuffers::String> modelDescription =
129 flatBufferBuilder.CreateString("ArmnnDelegate: Add and Div Operator Model");
130
131 std::array<flatbuffers::Offset<OperatorCode>, 2> codes;
132 codes[0] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_ADD);
133 codes[1] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_DIV);
134
135 flatbuffers::Offset<Model> flatbufferModel =
136 CreateModel(flatBufferBuilder,
137 TFLITE_SCHEMA_VERSION,
138 flatBufferBuilder.CreateVector(codes.data(), codes.size()),
139 flatBufferBuilder.CreateVector(&subgraph, 1),
140 modelDescription,
141 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
142
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100143 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000144
145 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
146 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
147}
148
Teresa Charlin93f0ad02023-03-23 15:28:02 +0000149std::vector<char> CreateCosTfLiteModel(tflite::TensorType tensorType,
150 const std::vector <int32_t>& tensorShape,
151 float quantScale = 1.0f,
152 int quantOffset = 0)
Sadik Armaganca565c12022-08-16 12:17:24 +0100153{
154 using namespace tflite;
155 flatbuffers::FlatBufferBuilder flatBufferBuilder;
156
157 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +0000158 buffers.push_back(CreateBuffer(flatBufferBuilder));
Sadik Armaganca565c12022-08-16 12:17:24 +0100159
160 auto quantizationParameters =
161 CreateQuantizationParameters(flatBufferBuilder,
162 0,
163 0,
164 flatBufferBuilder.CreateVector<float>({quantScale}),
165 flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
166
167 std::array<flatbuffers::Offset<Tensor>, 2> tensors;
168 tensors[0] = CreateTensor(flatBufferBuilder,
169 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
170 tensorShape.size()),
171 tensorType,
172 0,
173 flatBufferBuilder.CreateString("input"),
174 quantizationParameters);
175 tensors[1] = CreateTensor(flatBufferBuilder,
176 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
177 tensorShape.size()),
178 tensorType,
179 0,
180 flatBufferBuilder.CreateString("output"),
181 quantizationParameters);
182
183 const std::vector<int32_t> operatorInputs({0});
184 const std::vector<int32_t> operatorOutputs({1});
185
186 flatbuffers::Offset<Operator> ceilOperator =
187 CreateOperator(flatBufferBuilder,
188 0,
189 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
190 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
191 BuiltinOptions_NONE);
192
193 flatbuffers::Offset<flatbuffers::String> modelDescription =
194 flatBufferBuilder.CreateString("ArmnnDelegate: CEIL Operator Model");
195 flatbuffers::Offset<OperatorCode> operatorCode =
Teresa Charlin93f0ad02023-03-23 15:28:02 +0000196 CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_COS);
Sadik Armaganca565c12022-08-16 12:17:24 +0100197
198 const std::vector<int32_t> subgraphInputs({0});
199 const std::vector<int32_t> subgraphOutputs({1});
200 flatbuffers::Offset<SubGraph> subgraph =
201 CreateSubGraph(flatBufferBuilder,
202 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
203 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
204 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
205 flatBufferBuilder.CreateVector(&ceilOperator, 1));
206
207 flatbuffers::Offset<Model> flatbufferModel =
208 CreateModel(flatBufferBuilder,
209 TFLITE_SCHEMA_VERSION,
210 flatBufferBuilder.CreateVector(&operatorCode, 1),
211 flatBufferBuilder.CreateVector(&subgraph, 1),
212 modelDescription,
213 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
214
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100215 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Sadik Armaganca565c12022-08-16 12:17:24 +0100216 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
217 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
218}
219
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000220template <typename T>
221void DelegateOptionTest(tflite::TensorType tensorType,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000222 std::vector<int32_t>& tensorShape,
223 std::vector<T>& input0Values,
224 std::vector<T>& input1Values,
225 std::vector<T>& input2Values,
226 std::vector<T>& expectedOutputValues,
227 const armnnDelegate::DelegateOptions& delegateOptions,
228 float quantScale = 1.0f,
229 int quantOffset = 0)
230{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100231 using namespace delegateTestInterpreter;
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000232 std::vector<char> modelBuffer = CreateAddDivTfLiteModel(tensorType,
233 tensorShape,
234 quantScale,
235 quantOffset);
236
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100237 // Setup interpreter with just TFLite Runtime.
238 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
239 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
240 CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
241 CHECK(tfLiteInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
242 CHECK(tfLiteInterpreter.FillInputTensor<T>(input2Values, 2) == kTfLiteOk);
243 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
244 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
245 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000246
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100247 // Setup interpreter with Arm NN Delegate applied.
248 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions);
249 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
250 CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
251 CHECK(armnnInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
252 CHECK(armnnInterpreter.FillInputTensor<T>(input2Values, 2) == kTfLiteOk);
253 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
254 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
255 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000256
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100257 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
258 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000259
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100260 tfLiteInterpreter.Cleanup();
261 armnnInterpreter.Cleanup();
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000262}
263
Sadik Armaganca565c12022-08-16 12:17:24 +0100264template <typename T>
265void DelegateOptionNoFallbackTest(tflite::TensorType tensorType,
Sadik Armaganca565c12022-08-16 12:17:24 +0100266 std::vector<int32_t>& tensorShape,
267 std::vector<T>& inputValues,
268 std::vector<T>& expectedOutputValues,
269 const armnnDelegate::DelegateOptions& delegateOptions,
270 float quantScale = 1.0f,
271 int quantOffset = 0)
272{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100273 using namespace delegateTestInterpreter;
Teresa Charlin93f0ad02023-03-23 15:28:02 +0000274 std::vector<char> modelBuffer = CreateCosTfLiteModel(tensorType,
275 tensorShape,
276 quantScale,
277 quantOffset);
Sadik Armaganca565c12022-08-16 12:17:24 +0100278
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100279 // Setup interpreter with just TFLite Runtime.
280 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
281 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
282 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
283 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
284 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
285 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
286 tfLiteInterpreter.Cleanup();
Sadik Armaganca565c12022-08-16 12:17:24 +0100287
Sadik Armaganca565c12022-08-16 12:17:24 +0100288 try
289 {
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100290 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions);
291 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
292 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
293 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
294 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
295 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
296 armnnInterpreter.Cleanup();
297
298 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
299 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape);
Sadik Armaganca565c12022-08-16 12:17:24 +0100300 }
301 catch (const armnn::Exception& e)
302 {
303 // Forward the exception message to std::cout
304 std::cout << e.what() << std::endl;
305 }
Sadik Armaganca565c12022-08-16 12:17:24 +0100306}
307
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000308} // anonymous namespace