Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 1 | // |
Ryan OShea | 238ecd9 | 2023-03-07 11:44:23 +0000 | [diff] [blame] | 2 | // Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved. |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
| 5 | |
| 6 | #pragma once |
| 7 | |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 8 | #include "TestUtils.hpp" |
| 9 | |
Matthew Sloyan | ebe392d | 2023-03-30 10:12:08 +0100 | [diff] [blame] | 10 | #include <armnn_delegate.hpp> |
| 11 | #include <DelegateTestInterpreter.hpp> |
| 12 | |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 13 | #include <flatbuffers/flatbuffers.h> |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 14 | #include <tensorflow/lite/kernels/register.h> |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 15 | #include <tensorflow/lite/version.h> |
| 16 | |
| 17 | #include <doctest/doctest.h> |
| 18 | |
| 19 | namespace |
| 20 | { |
| 21 | |
| 22 | struct StreamRedirector |
| 23 | { |
| 24 | public: |
| 25 | StreamRedirector(std::ostream &stream, std::streambuf *newStreamBuffer) |
| 26 | : m_Stream(stream), m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer)) {} |
| 27 | |
| 28 | ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); } |
| 29 | |
| 30 | private: |
| 31 | std::ostream &m_Stream; |
| 32 | std::streambuf *m_BackupBuffer; |
| 33 | }; |
| 34 | |
| 35 | std::vector<char> CreateAddDivTfLiteModel(tflite::TensorType tensorType, |
| 36 | const std::vector<int32_t>& tensorShape, |
| 37 | float quantScale = 1.0f, |
| 38 | int quantOffset = 0) |
| 39 | { |
| 40 | using namespace tflite; |
| 41 | flatbuffers::FlatBufferBuilder flatBufferBuilder; |
| 42 | |
| 43 | std::vector<flatbuffers::Offset<tflite::Buffer>> buffers; |
Ryan OShea | 238ecd9 | 2023-03-07 11:44:23 +0000 | [diff] [blame] | 44 | buffers.push_back(CreateBuffer(flatBufferBuilder)); |
| 45 | buffers.push_back(CreateBuffer(flatBufferBuilder)); |
| 46 | buffers.push_back(CreateBuffer(flatBufferBuilder)); |
| 47 | buffers.push_back(CreateBuffer(flatBufferBuilder)); |
| 48 | buffers.push_back(CreateBuffer(flatBufferBuilder)); |
| 49 | buffers.push_back(CreateBuffer(flatBufferBuilder)); |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 50 | |
| 51 | auto quantizationParameters = |
| 52 | CreateQuantizationParameters(flatBufferBuilder, |
| 53 | 0, |
| 54 | 0, |
| 55 | flatBufferBuilder.CreateVector<float>({ quantScale }), |
| 56 | flatBufferBuilder.CreateVector<int64_t>({ quantOffset })); |
| 57 | |
| 58 | |
| 59 | std::array<flatbuffers::Offset<Tensor>, 5> tensors; |
| 60 | tensors[0] = CreateTensor(flatBufferBuilder, |
| 61 | flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), |
| 62 | tensorShape.size()), |
| 63 | tensorType, |
Ryan OShea | 238ecd9 | 2023-03-07 11:44:23 +0000 | [diff] [blame] | 64 | 1, |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 65 | flatBufferBuilder.CreateString("input_0"), |
| 66 | quantizationParameters); |
| 67 | tensors[1] = CreateTensor(flatBufferBuilder, |
| 68 | flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), |
| 69 | tensorShape.size()), |
| 70 | tensorType, |
Ryan OShea | 238ecd9 | 2023-03-07 11:44:23 +0000 | [diff] [blame] | 71 | 2, |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 72 | flatBufferBuilder.CreateString("input_1"), |
| 73 | quantizationParameters); |
| 74 | tensors[2] = CreateTensor(flatBufferBuilder, |
| 75 | flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), |
| 76 | tensorShape.size()), |
| 77 | tensorType, |
Ryan OShea | 238ecd9 | 2023-03-07 11:44:23 +0000 | [diff] [blame] | 78 | 3, |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 79 | flatBufferBuilder.CreateString("input_2"), |
| 80 | quantizationParameters); |
| 81 | tensors[3] = CreateTensor(flatBufferBuilder, |
| 82 | flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), |
| 83 | tensorShape.size()), |
| 84 | tensorType, |
Ryan OShea | 238ecd9 | 2023-03-07 11:44:23 +0000 | [diff] [blame] | 85 | 4, |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 86 | flatBufferBuilder.CreateString("add"), |
| 87 | quantizationParameters); |
| 88 | tensors[4] = CreateTensor(flatBufferBuilder, |
| 89 | flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), |
| 90 | tensorShape.size()), |
| 91 | tensorType, |
Ryan OShea | 238ecd9 | 2023-03-07 11:44:23 +0000 | [diff] [blame] | 92 | 5, |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 93 | flatBufferBuilder.CreateString("output"), |
| 94 | quantizationParameters); |
| 95 | |
| 96 | // create operator |
| 97 | tflite::BuiltinOptions addBuiltinOptionsType = tflite::BuiltinOptions_AddOptions; |
| 98 | flatbuffers::Offset<void> addBuiltinOptions = |
| 99 | CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union(); |
| 100 | |
| 101 | tflite::BuiltinOptions divBuiltinOptionsType = tflite::BuiltinOptions_DivOptions; |
| 102 | flatbuffers::Offset<void> divBuiltinOptions = |
| 103 | CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union(); |
| 104 | |
| 105 | std::array<flatbuffers::Offset<Operator>, 2> operators; |
| 106 | const std::vector<int32_t> addInputs{0, 1}; |
| 107 | const std::vector<int32_t> addOutputs{3}; |
| 108 | operators[0] = CreateOperator(flatBufferBuilder, |
| 109 | 0, |
| 110 | flatBufferBuilder.CreateVector<int32_t>(addInputs.data(), addInputs.size()), |
| 111 | flatBufferBuilder.CreateVector<int32_t>(addOutputs.data(), addOutputs.size()), |
| 112 | addBuiltinOptionsType, |
| 113 | addBuiltinOptions); |
| 114 | const std::vector<int32_t> divInputs{3, 2}; |
| 115 | const std::vector<int32_t> divOutputs{4}; |
| 116 | operators[1] = CreateOperator(flatBufferBuilder, |
| 117 | 1, |
| 118 | flatBufferBuilder.CreateVector<int32_t>(divInputs.data(), divInputs.size()), |
| 119 | flatBufferBuilder.CreateVector<int32_t>(divOutputs.data(), divOutputs.size()), |
| 120 | divBuiltinOptionsType, |
| 121 | divBuiltinOptions); |
| 122 | |
| 123 | const std::vector<int> subgraphInputs{0, 1, 2}; |
| 124 | const std::vector<int> subgraphOutputs{4}; |
| 125 | flatbuffers::Offset<SubGraph> subgraph = |
| 126 | CreateSubGraph(flatBufferBuilder, |
| 127 | flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), |
| 128 | flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()), |
| 129 | flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()), |
| 130 | flatBufferBuilder.CreateVector(operators.data(), operators.size())); |
| 131 | |
| 132 | flatbuffers::Offset<flatbuffers::String> modelDescription = |
| 133 | flatBufferBuilder.CreateString("ArmnnDelegate: Add and Div Operator Model"); |
| 134 | |
| 135 | std::array<flatbuffers::Offset<OperatorCode>, 2> codes; |
| 136 | codes[0] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_ADD); |
| 137 | codes[1] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_DIV); |
| 138 | |
| 139 | flatbuffers::Offset<Model> flatbufferModel = |
| 140 | CreateModel(flatBufferBuilder, |
| 141 | TFLITE_SCHEMA_VERSION, |
| 142 | flatBufferBuilder.CreateVector(codes.data(), codes.size()), |
| 143 | flatBufferBuilder.CreateVector(&subgraph, 1), |
| 144 | modelDescription, |
| 145 | flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); |
| 146 | |
Matthew Sloyan | ebe392d | 2023-03-30 10:12:08 +0100 | [diff] [blame] | 147 | flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 148 | |
| 149 | return std::vector<char>(flatBufferBuilder.GetBufferPointer(), |
| 150 | flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); |
| 151 | } |
| 152 | |
Teresa Charlin | 93f0ad0 | 2023-03-23 15:28:02 +0000 | [diff] [blame] | 153 | std::vector<char> CreateCosTfLiteModel(tflite::TensorType tensorType, |
| 154 | const std::vector <int32_t>& tensorShape, |
| 155 | float quantScale = 1.0f, |
| 156 | int quantOffset = 0) |
Sadik Armagan | ca565c1 | 2022-08-16 12:17:24 +0100 | [diff] [blame] | 157 | { |
| 158 | using namespace tflite; |
| 159 | flatbuffers::FlatBufferBuilder flatBufferBuilder; |
| 160 | |
| 161 | std::vector<flatbuffers::Offset<tflite::Buffer>> buffers; |
Ryan OShea | 238ecd9 | 2023-03-07 11:44:23 +0000 | [diff] [blame] | 162 | buffers.push_back(CreateBuffer(flatBufferBuilder)); |
Sadik Armagan | ca565c1 | 2022-08-16 12:17:24 +0100 | [diff] [blame] | 163 | |
| 164 | auto quantizationParameters = |
| 165 | CreateQuantizationParameters(flatBufferBuilder, |
| 166 | 0, |
| 167 | 0, |
| 168 | flatBufferBuilder.CreateVector<float>({quantScale}), |
| 169 | flatBufferBuilder.CreateVector<int64_t>({quantOffset})); |
| 170 | |
| 171 | std::array<flatbuffers::Offset<Tensor>, 2> tensors; |
| 172 | tensors[0] = CreateTensor(flatBufferBuilder, |
| 173 | flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), |
| 174 | tensorShape.size()), |
| 175 | tensorType, |
| 176 | 0, |
| 177 | flatBufferBuilder.CreateString("input"), |
| 178 | quantizationParameters); |
| 179 | tensors[1] = CreateTensor(flatBufferBuilder, |
| 180 | flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), |
| 181 | tensorShape.size()), |
| 182 | tensorType, |
| 183 | 0, |
| 184 | flatBufferBuilder.CreateString("output"), |
| 185 | quantizationParameters); |
| 186 | |
| 187 | const std::vector<int32_t> operatorInputs({0}); |
| 188 | const std::vector<int32_t> operatorOutputs({1}); |
| 189 | |
| 190 | flatbuffers::Offset<Operator> ceilOperator = |
| 191 | CreateOperator(flatBufferBuilder, |
| 192 | 0, |
| 193 | flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()), |
| 194 | flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()), |
| 195 | BuiltinOptions_NONE); |
| 196 | |
| 197 | flatbuffers::Offset<flatbuffers::String> modelDescription = |
| 198 | flatBufferBuilder.CreateString("ArmnnDelegate: CEIL Operator Model"); |
| 199 | flatbuffers::Offset<OperatorCode> operatorCode = |
Teresa Charlin | 93f0ad0 | 2023-03-23 15:28:02 +0000 | [diff] [blame] | 200 | CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_COS); |
Sadik Armagan | ca565c1 | 2022-08-16 12:17:24 +0100 | [diff] [blame] | 201 | |
| 202 | const std::vector<int32_t> subgraphInputs({0}); |
| 203 | const std::vector<int32_t> subgraphOutputs({1}); |
| 204 | flatbuffers::Offset<SubGraph> subgraph = |
| 205 | CreateSubGraph(flatBufferBuilder, |
| 206 | flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), |
| 207 | flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()), |
| 208 | flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()), |
| 209 | flatBufferBuilder.CreateVector(&ceilOperator, 1)); |
| 210 | |
| 211 | flatbuffers::Offset<Model> flatbufferModel = |
| 212 | CreateModel(flatBufferBuilder, |
| 213 | TFLITE_SCHEMA_VERSION, |
| 214 | flatBufferBuilder.CreateVector(&operatorCode, 1), |
| 215 | flatBufferBuilder.CreateVector(&subgraph, 1), |
| 216 | modelDescription, |
| 217 | flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); |
| 218 | |
Matthew Sloyan | ebe392d | 2023-03-30 10:12:08 +0100 | [diff] [blame] | 219 | flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); |
Sadik Armagan | ca565c1 | 2022-08-16 12:17:24 +0100 | [diff] [blame] | 220 | return std::vector<char>(flatBufferBuilder.GetBufferPointer(), |
| 221 | flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); |
| 222 | } |
| 223 | |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 224 | template <typename T> |
| 225 | void DelegateOptionTest(tflite::TensorType tensorType, |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 226 | std::vector<int32_t>& tensorShape, |
| 227 | std::vector<T>& input0Values, |
| 228 | std::vector<T>& input1Values, |
| 229 | std::vector<T>& input2Values, |
| 230 | std::vector<T>& expectedOutputValues, |
| 231 | const armnnDelegate::DelegateOptions& delegateOptions, |
| 232 | float quantScale = 1.0f, |
| 233 | int quantOffset = 0) |
| 234 | { |
Matthew Sloyan | ebe392d | 2023-03-30 10:12:08 +0100 | [diff] [blame] | 235 | using namespace delegateTestInterpreter; |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 236 | std::vector<char> modelBuffer = CreateAddDivTfLiteModel(tensorType, |
| 237 | tensorShape, |
| 238 | quantScale, |
| 239 | quantOffset); |
| 240 | |
Matthew Sloyan | ebe392d | 2023-03-30 10:12:08 +0100 | [diff] [blame] | 241 | // Setup interpreter with just TFLite Runtime. |
| 242 | auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); |
| 243 | CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); |
| 244 | CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk); |
| 245 | CHECK(tfLiteInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk); |
| 246 | CHECK(tfLiteInterpreter.FillInputTensor<T>(input2Values, 2) == kTfLiteOk); |
| 247 | CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); |
| 248 | std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0); |
| 249 | std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 250 | |
Matthew Sloyan | ebe392d | 2023-03-30 10:12:08 +0100 | [diff] [blame] | 251 | // Setup interpreter with Arm NN Delegate applied. |
| 252 | auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions); |
| 253 | CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); |
| 254 | CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk); |
| 255 | CHECK(armnnInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk); |
| 256 | CHECK(armnnInterpreter.FillInputTensor<T>(input2Values, 2) == kTfLiteOk); |
| 257 | CHECK(armnnInterpreter.Invoke() == kTfLiteOk); |
| 258 | std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0); |
| 259 | std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0); |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 260 | |
Matthew Sloyan | ebe392d | 2023-03-30 10:12:08 +0100 | [diff] [blame] | 261 | armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); |
| 262 | armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape); |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 263 | |
Matthew Sloyan | ebe392d | 2023-03-30 10:12:08 +0100 | [diff] [blame] | 264 | tfLiteInterpreter.Cleanup(); |
| 265 | armnnInterpreter.Cleanup(); |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 266 | } |
| 267 | |
Sadik Armagan | ca565c1 | 2022-08-16 12:17:24 +0100 | [diff] [blame] | 268 | template <typename T> |
| 269 | void DelegateOptionNoFallbackTest(tflite::TensorType tensorType, |
Sadik Armagan | ca565c1 | 2022-08-16 12:17:24 +0100 | [diff] [blame] | 270 | std::vector<int32_t>& tensorShape, |
| 271 | std::vector<T>& inputValues, |
| 272 | std::vector<T>& expectedOutputValues, |
| 273 | const armnnDelegate::DelegateOptions& delegateOptions, |
| 274 | float quantScale = 1.0f, |
| 275 | int quantOffset = 0) |
| 276 | { |
Matthew Sloyan | ebe392d | 2023-03-30 10:12:08 +0100 | [diff] [blame] | 277 | using namespace delegateTestInterpreter; |
Teresa Charlin | 93f0ad0 | 2023-03-23 15:28:02 +0000 | [diff] [blame] | 278 | std::vector<char> modelBuffer = CreateCosTfLiteModel(tensorType, |
| 279 | tensorShape, |
| 280 | quantScale, |
| 281 | quantOffset); |
Sadik Armagan | ca565c1 | 2022-08-16 12:17:24 +0100 | [diff] [blame] | 282 | |
Matthew Sloyan | ebe392d | 2023-03-30 10:12:08 +0100 | [diff] [blame] | 283 | // Setup interpreter with just TFLite Runtime. |
| 284 | auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); |
| 285 | CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); |
| 286 | CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk); |
| 287 | CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); |
| 288 | std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0); |
| 289 | std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); |
| 290 | tfLiteInterpreter.Cleanup(); |
Sadik Armagan | ca565c1 | 2022-08-16 12:17:24 +0100 | [diff] [blame] | 291 | |
Sadik Armagan | ca565c1 | 2022-08-16 12:17:24 +0100 | [diff] [blame] | 292 | try |
| 293 | { |
Matthew Sloyan | ebe392d | 2023-03-30 10:12:08 +0100 | [diff] [blame] | 294 | auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions); |
| 295 | CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); |
| 296 | CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk); |
| 297 | CHECK(armnnInterpreter.Invoke() == kTfLiteOk); |
| 298 | std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0); |
| 299 | std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0); |
| 300 | armnnInterpreter.Cleanup(); |
| 301 | |
| 302 | armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); |
| 303 | armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape); |
Sadik Armagan | ca565c1 | 2022-08-16 12:17:24 +0100 | [diff] [blame] | 304 | } |
| 305 | catch (const armnn::Exception& e) |
| 306 | { |
| 307 | // Forward the exception message to std::cout |
| 308 | std::cout << e.what() << std::endl; |
| 309 | } |
Sadik Armagan | ca565c1 | 2022-08-16 12:17:24 +0100 | [diff] [blame] | 310 | } |
| 311 | |
Narumol Prangnawarat | 0b51d5a | 2021-01-20 15:58:29 +0000 | [diff] [blame] | 312 | } // anonymous namespace |