blob: 76d127237c0e640673e80dbfbdcaf9d4e5a95d58 [file] [log] [blame]
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00001//
Ryan OShea238ecd92023-03-07 11:44:23 +00002// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00008#include "TestUtils.hpp"
9
Matthew Sloyanebe392d2023-03-30 10:12:08 +010010#include <armnn_delegate.hpp>
11#include <DelegateTestInterpreter.hpp>
12
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000013#include <flatbuffers/flatbuffers.h>
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000014#include <tensorflow/lite/kernels/register.h>
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000015#include <tensorflow/lite/version.h>
16
17#include <doctest/doctest.h>
18
19namespace
20{
21
22struct StreamRedirector
23{
24public:
25 StreamRedirector(std::ostream &stream, std::streambuf *newStreamBuffer)
26 : m_Stream(stream), m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer)) {}
27
28 ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); }
29
30private:
31 std::ostream &m_Stream;
32 std::streambuf *m_BackupBuffer;
33};
34
35std::vector<char> CreateAddDivTfLiteModel(tflite::TensorType tensorType,
36 const std::vector<int32_t>& tensorShape,
37 float quantScale = 1.0f,
38 int quantOffset = 0)
39{
40 using namespace tflite;
41 flatbuffers::FlatBufferBuilder flatBufferBuilder;
42
43 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +000044 buffers.push_back(CreateBuffer(flatBufferBuilder));
45 buffers.push_back(CreateBuffer(flatBufferBuilder));
46 buffers.push_back(CreateBuffer(flatBufferBuilder));
47 buffers.push_back(CreateBuffer(flatBufferBuilder));
48 buffers.push_back(CreateBuffer(flatBufferBuilder));
49 buffers.push_back(CreateBuffer(flatBufferBuilder));
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000050
51 auto quantizationParameters =
52 CreateQuantizationParameters(flatBufferBuilder,
53 0,
54 0,
55 flatBufferBuilder.CreateVector<float>({ quantScale }),
56 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
57
58
59 std::array<flatbuffers::Offset<Tensor>, 5> tensors;
60 tensors[0] = CreateTensor(flatBufferBuilder,
61 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
62 tensorShape.size()),
63 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000064 1,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000065 flatBufferBuilder.CreateString("input_0"),
66 quantizationParameters);
67 tensors[1] = CreateTensor(flatBufferBuilder,
68 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
69 tensorShape.size()),
70 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000071 2,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000072 flatBufferBuilder.CreateString("input_1"),
73 quantizationParameters);
74 tensors[2] = CreateTensor(flatBufferBuilder,
75 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
76 tensorShape.size()),
77 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000078 3,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000079 flatBufferBuilder.CreateString("input_2"),
80 quantizationParameters);
81 tensors[3] = CreateTensor(flatBufferBuilder,
82 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
83 tensorShape.size()),
84 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000085 4,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000086 flatBufferBuilder.CreateString("add"),
87 quantizationParameters);
88 tensors[4] = CreateTensor(flatBufferBuilder,
89 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
90 tensorShape.size()),
91 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000092 5,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000093 flatBufferBuilder.CreateString("output"),
94 quantizationParameters);
95
96 // create operator
97 tflite::BuiltinOptions addBuiltinOptionsType = tflite::BuiltinOptions_AddOptions;
98 flatbuffers::Offset<void> addBuiltinOptions =
99 CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union();
100
101 tflite::BuiltinOptions divBuiltinOptionsType = tflite::BuiltinOptions_DivOptions;
102 flatbuffers::Offset<void> divBuiltinOptions =
103 CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union();
104
105 std::array<flatbuffers::Offset<Operator>, 2> operators;
106 const std::vector<int32_t> addInputs{0, 1};
107 const std::vector<int32_t> addOutputs{3};
108 operators[0] = CreateOperator(flatBufferBuilder,
109 0,
110 flatBufferBuilder.CreateVector<int32_t>(addInputs.data(), addInputs.size()),
111 flatBufferBuilder.CreateVector<int32_t>(addOutputs.data(), addOutputs.size()),
112 addBuiltinOptionsType,
113 addBuiltinOptions);
114 const std::vector<int32_t> divInputs{3, 2};
115 const std::vector<int32_t> divOutputs{4};
116 operators[1] = CreateOperator(flatBufferBuilder,
117 1,
118 flatBufferBuilder.CreateVector<int32_t>(divInputs.data(), divInputs.size()),
119 flatBufferBuilder.CreateVector<int32_t>(divOutputs.data(), divOutputs.size()),
120 divBuiltinOptionsType,
121 divBuiltinOptions);
122
123 const std::vector<int> subgraphInputs{0, 1, 2};
124 const std::vector<int> subgraphOutputs{4};
125 flatbuffers::Offset<SubGraph> subgraph =
126 CreateSubGraph(flatBufferBuilder,
127 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
128 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
129 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
130 flatBufferBuilder.CreateVector(operators.data(), operators.size()));
131
132 flatbuffers::Offset<flatbuffers::String> modelDescription =
133 flatBufferBuilder.CreateString("ArmnnDelegate: Add and Div Operator Model");
134
135 std::array<flatbuffers::Offset<OperatorCode>, 2> codes;
136 codes[0] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_ADD);
137 codes[1] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_DIV);
138
139 flatbuffers::Offset<Model> flatbufferModel =
140 CreateModel(flatBufferBuilder,
141 TFLITE_SCHEMA_VERSION,
142 flatBufferBuilder.CreateVector(codes.data(), codes.size()),
143 flatBufferBuilder.CreateVector(&subgraph, 1),
144 modelDescription,
145 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
146
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100147 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000148
149 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
150 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
151}
152
Teresa Charlin93f0ad02023-03-23 15:28:02 +0000153std::vector<char> CreateCosTfLiteModel(tflite::TensorType tensorType,
154 const std::vector <int32_t>& tensorShape,
155 float quantScale = 1.0f,
156 int quantOffset = 0)
Sadik Armaganca565c12022-08-16 12:17:24 +0100157{
158 using namespace tflite;
159 flatbuffers::FlatBufferBuilder flatBufferBuilder;
160
161 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +0000162 buffers.push_back(CreateBuffer(flatBufferBuilder));
Sadik Armaganca565c12022-08-16 12:17:24 +0100163
164 auto quantizationParameters =
165 CreateQuantizationParameters(flatBufferBuilder,
166 0,
167 0,
168 flatBufferBuilder.CreateVector<float>({quantScale}),
169 flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
170
171 std::array<flatbuffers::Offset<Tensor>, 2> tensors;
172 tensors[0] = CreateTensor(flatBufferBuilder,
173 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
174 tensorShape.size()),
175 tensorType,
176 0,
177 flatBufferBuilder.CreateString("input"),
178 quantizationParameters);
179 tensors[1] = CreateTensor(flatBufferBuilder,
180 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
181 tensorShape.size()),
182 tensorType,
183 0,
184 flatBufferBuilder.CreateString("output"),
185 quantizationParameters);
186
187 const std::vector<int32_t> operatorInputs({0});
188 const std::vector<int32_t> operatorOutputs({1});
189
190 flatbuffers::Offset<Operator> ceilOperator =
191 CreateOperator(flatBufferBuilder,
192 0,
193 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
194 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
195 BuiltinOptions_NONE);
196
197 flatbuffers::Offset<flatbuffers::String> modelDescription =
198 flatBufferBuilder.CreateString("ArmnnDelegate: CEIL Operator Model");
199 flatbuffers::Offset<OperatorCode> operatorCode =
Teresa Charlin93f0ad02023-03-23 15:28:02 +0000200 CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_COS);
Sadik Armaganca565c12022-08-16 12:17:24 +0100201
202 const std::vector<int32_t> subgraphInputs({0});
203 const std::vector<int32_t> subgraphOutputs({1});
204 flatbuffers::Offset<SubGraph> subgraph =
205 CreateSubGraph(flatBufferBuilder,
206 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
207 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
208 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
209 flatBufferBuilder.CreateVector(&ceilOperator, 1));
210
211 flatbuffers::Offset<Model> flatbufferModel =
212 CreateModel(flatBufferBuilder,
213 TFLITE_SCHEMA_VERSION,
214 flatBufferBuilder.CreateVector(&operatorCode, 1),
215 flatBufferBuilder.CreateVector(&subgraph, 1),
216 modelDescription,
217 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
218
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100219 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Sadik Armaganca565c12022-08-16 12:17:24 +0100220 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
221 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
222}
223
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000224template <typename T>
225void DelegateOptionTest(tflite::TensorType tensorType,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000226 std::vector<int32_t>& tensorShape,
227 std::vector<T>& input0Values,
228 std::vector<T>& input1Values,
229 std::vector<T>& input2Values,
230 std::vector<T>& expectedOutputValues,
231 const armnnDelegate::DelegateOptions& delegateOptions,
232 float quantScale = 1.0f,
233 int quantOffset = 0)
234{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100235 using namespace delegateTestInterpreter;
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000236 std::vector<char> modelBuffer = CreateAddDivTfLiteModel(tensorType,
237 tensorShape,
238 quantScale,
239 quantOffset);
240
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100241 // Setup interpreter with just TFLite Runtime.
242 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
243 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
244 CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
245 CHECK(tfLiteInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
246 CHECK(tfLiteInterpreter.FillInputTensor<T>(input2Values, 2) == kTfLiteOk);
247 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
248 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
249 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000250
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100251 // Setup interpreter with Arm NN Delegate applied.
252 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions);
253 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
254 CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
255 CHECK(armnnInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
256 CHECK(armnnInterpreter.FillInputTensor<T>(input2Values, 2) == kTfLiteOk);
257 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
258 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
259 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000260
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100261 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
262 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000263
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100264 tfLiteInterpreter.Cleanup();
265 armnnInterpreter.Cleanup();
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000266}
267
Sadik Armaganca565c12022-08-16 12:17:24 +0100268template <typename T>
269void DelegateOptionNoFallbackTest(tflite::TensorType tensorType,
Sadik Armaganca565c12022-08-16 12:17:24 +0100270 std::vector<int32_t>& tensorShape,
271 std::vector<T>& inputValues,
272 std::vector<T>& expectedOutputValues,
273 const armnnDelegate::DelegateOptions& delegateOptions,
274 float quantScale = 1.0f,
275 int quantOffset = 0)
276{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100277 using namespace delegateTestInterpreter;
Teresa Charlin93f0ad02023-03-23 15:28:02 +0000278 std::vector<char> modelBuffer = CreateCosTfLiteModel(tensorType,
279 tensorShape,
280 quantScale,
281 quantOffset);
Sadik Armaganca565c12022-08-16 12:17:24 +0100282
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100283 // Setup interpreter with just TFLite Runtime.
284 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
285 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
286 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
287 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
288 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
289 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
290 tfLiteInterpreter.Cleanup();
Sadik Armaganca565c12022-08-16 12:17:24 +0100291
Sadik Armaganca565c12022-08-16 12:17:24 +0100292 try
293 {
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100294 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions);
295 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
296 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
297 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
298 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
299 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
300 armnnInterpreter.Cleanup();
301
302 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
303 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape);
Sadik Armaganca565c12022-08-16 12:17:24 +0100304 }
305 catch (const armnn::Exception& e)
306 {
307 // Forward the exception message to std::cout
308 std::cout << e.what() << std::endl;
309 }
Sadik Armaganca565c12022-08-16 12:17:24 +0100310}
311
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000312} // anonymous namespace