blob: 60092726a2e560a0b2b05538e767e46c1438e897 [file] [log] [blame]
Sadik Armagan67e95f22020-10-29 16:14:54 +00001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn_delegate.hpp>
9
10#include <flatbuffers/flatbuffers.h>
11#include <tensorflow/lite/interpreter.h>
12#include <tensorflow/lite/kernels/register.h>
13#include <tensorflow/lite/model.h>
14#include <tensorflow/lite/schema/schema_generated.h>
15#include <tensorflow/lite/version.h>
16
17#include <doctest/doctest.h>
18
19namespace
20{
21
22std::vector<char> CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator binaryOperatorCode,
23 tflite::ActivationFunctionType activationType,
24 tflite::TensorType tensorType,
25 const std::vector <int32_t>& input0TensorShape,
26 const std::vector <int32_t>& input1TensorShape,
Sadik Armagan21a94ff2020-11-09 08:38:30 +000027 const std::vector <int32_t>& outputTensorShape,
28 float quantScale = 1.0f,
29 int quantOffset = 0)
Sadik Armagan67e95f22020-10-29 16:14:54 +000030{
31 using namespace tflite;
32 flatbuffers::FlatBufferBuilder flatBufferBuilder;
33
34 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
35 buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
36
Sadik Armagan21a94ff2020-11-09 08:38:30 +000037 auto quantizationParameters =
38 CreateQuantizationParameters(flatBufferBuilder,
39 0,
40 0,
41 flatBufferBuilder.CreateVector<float>({ quantScale }),
42 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
43
44
Sadik Armagan67e95f22020-10-29 16:14:54 +000045 std::array<flatbuffers::Offset<Tensor>, 3> tensors;
46 tensors[0] = CreateTensor(flatBufferBuilder,
47 flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
48 input0TensorShape.size()),
Sadik Armagan21a94ff2020-11-09 08:38:30 +000049 tensorType,
50 0,
51 flatBufferBuilder.CreateString("input_0"),
52 quantizationParameters);
Sadik Armagan67e95f22020-10-29 16:14:54 +000053 tensors[1] = CreateTensor(flatBufferBuilder,
54 flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
55 input1TensorShape.size()),
Sadik Armagan21a94ff2020-11-09 08:38:30 +000056 tensorType,
57 0,
58 flatBufferBuilder.CreateString("input_1"),
59 quantizationParameters);
Sadik Armagan67e95f22020-10-29 16:14:54 +000060 tensors[2] = CreateTensor(flatBufferBuilder,
61 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
62 outputTensorShape.size()),
Sadik Armagan21a94ff2020-11-09 08:38:30 +000063 tensorType,
64 0,
65 flatBufferBuilder.CreateString("output"),
66 quantizationParameters);
Sadik Armagan67e95f22020-10-29 16:14:54 +000067
68 // create operator
69 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
70 flatbuffers::Offset<void> operatorBuiltinOptions = 0;
71 switch (binaryOperatorCode)
72 {
73 case BuiltinOperator_ADD:
74 {
75 operatorBuiltinOptionsType = BuiltinOptions_AddOptions;
76 operatorBuiltinOptions = CreateAddOptions(flatBufferBuilder, activationType).Union();
77 break;
78 }
79 case BuiltinOperator_DIV:
80 {
81 operatorBuiltinOptionsType = BuiltinOptions_DivOptions;
82 operatorBuiltinOptions = CreateDivOptions(flatBufferBuilder, activationType).Union();
83 break;
84 }
Sadik Armagan21a94ff2020-11-09 08:38:30 +000085 case BuiltinOperator_MAXIMUM:
86 {
87 operatorBuiltinOptionsType = BuiltinOptions_MaximumMinimumOptions;
88 operatorBuiltinOptions = CreateMaximumMinimumOptions(flatBufferBuilder).Union();
89 break;
90 }
91 case BuiltinOperator_MINIMUM:
92 {
93 operatorBuiltinOptionsType = BuiltinOptions_MaximumMinimumOptions;
94 operatorBuiltinOptions = CreateMaximumMinimumOptions(flatBufferBuilder).Union();
95 break;
96 }
Sadik Armagan67e95f22020-10-29 16:14:54 +000097 case BuiltinOperator_MUL:
98 {
99 operatorBuiltinOptionsType = BuiltinOptions_MulOptions;
100 operatorBuiltinOptions = CreateMulOptions(flatBufferBuilder, activationType).Union();
101 break;
102 }
103 case BuiltinOperator_SUB:
104 {
105 operatorBuiltinOptionsType = BuiltinOptions_SubOptions;
106 operatorBuiltinOptions = CreateSubOptions(flatBufferBuilder, activationType).Union();
107 break;
108 }
109 default:
110 break;
111 }
112 const std::vector<int32_t> operatorInputs{ {0, 1} };
113 const std::vector<int32_t> operatorOutputs{{2}};
114 flatbuffers::Offset <Operator> elementwiseBinaryOperator =
115 CreateOperator(flatBufferBuilder,
116 0,
117 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
118 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
119 operatorBuiltinOptionsType,
120 operatorBuiltinOptions);
121
122 const std::vector<int> subgraphInputs{ {0, 1} };
123 const std::vector<int> subgraphOutputs{{2}};
124 flatbuffers::Offset <SubGraph> subgraph =
125 CreateSubGraph(flatBufferBuilder,
126 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
127 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
128 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
129 flatBufferBuilder.CreateVector(&elementwiseBinaryOperator, 1));
130
131 flatbuffers::Offset <flatbuffers::String> modelDescription =
132 flatBufferBuilder.CreateString("ArmnnDelegate: Elementwise Binary Operator Model");
133 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, binaryOperatorCode);
134
135 flatbuffers::Offset <Model> flatbufferModel =
136 CreateModel(flatBufferBuilder,
137 TFLITE_SCHEMA_VERSION,
138 flatBufferBuilder.CreateVector(&operatorCode, 1),
139 flatBufferBuilder.CreateVector(&subgraph, 1),
140 modelDescription,
141 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
142
143 flatBufferBuilder.Finish(flatbufferModel);
144
145 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
146 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
147}
148
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000149template <typename T>
150void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode,
151 tflite::ActivationFunctionType activationType,
152 tflite::TensorType tensorType,
153 std::vector<armnn::BackendId>& backends,
154 std::vector<int32_t>& input0Shape,
155 std::vector<int32_t>& input1Shape,
156 std::vector<int32_t>& outputShape,
157 std::vector<T>& input0Values,
158 std::vector<T>& input1Values,
159 std::vector<T>& expectedOutputValues,
160 float quantScale = 1.0f,
161 int quantOffset = 0)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000162{
163 using namespace tflite;
164 std::vector<char> modelBuffer = CreateElementwiseBinaryTfLiteModel(binaryOperatorCode,
165 activationType,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000166 tensorType,
167 input0Shape,
168 input1Shape,
169 outputShape,
170 quantScale,
171 quantOffset);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000172
173 const Model* tfLiteModel = GetModel(modelBuffer.data());
174 // Create TfLite Interpreters
175 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
176 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
177 (&armnnDelegateInterpreter) == kTfLiteOk);
178 CHECK(armnnDelegateInterpreter != nullptr);
179 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
180
181 std::unique_ptr<Interpreter> tfLiteInterpreter;
182 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
183 (&tfLiteInterpreter) == kTfLiteOk);
184 CHECK(tfLiteInterpreter != nullptr);
185 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
186
187 // Create the ArmNN Delegate
188 armnnDelegate::DelegateOptions delegateOptions(backends);
189 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
190 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
191 armnnDelegate::TfLiteArmnnDelegateDelete);
192 CHECK(theArmnnDelegate != nullptr);
193 // Modify armnnDelegateInterpreter to use armnnDelegate
194 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
195
196 // Set input data
197 auto tfLiteDelegateInput0Id = tfLiteInterpreter->inputs()[0];
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000198 auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput0Id);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000199 for (unsigned int i = 0; i < input0Values.size(); ++i)
200 {
201 tfLiteDelageInput0Data[i] = input0Values[i];
202 }
203
204 auto tfLiteDelegateInput1Id = tfLiteInterpreter->inputs()[1];
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000205 auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput1Id);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000206 for (unsigned int i = 0; i < input1Values.size(); ++i)
207 {
208 tfLiteDelageInput1Data[i] = input1Values[i];
209 }
210
211 auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000212 auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput0Id);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000213 for (unsigned int i = 0; i < input0Values.size(); ++i)
214 {
215 armnnDelegateInput0Data[i] = input0Values[i];
216 }
217
218 auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000219 auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput1Id);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000220 for (unsigned int i = 0; i < input1Values.size(); ++i)
221 {
222 armnnDelegateInput1Data[i] = input1Values[i];
223 }
224
225 // Run EnqueWorkload
226 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
227 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
228
229 // Compare output data
230 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000231 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000232 auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000233 auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000234 for (size_t i = 0; i < expectedOutputValues.size(); i++)
235 {
236 CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
237 CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]);
238 CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
239 }
240
241 armnnDelegateInterpreter.reset(nullptr);
242}
243
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000244} // anonymous namespace