blob: eafdf84835db78c800df3ce737e96783b8075659 [file] [log] [blame]
Sadik Armagan4b227bb2021-01-22 10:53:38 +00001//
Ryan OShea238ecd92023-03-07 11:44:23 +00002// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan4b227bb2021-01-22 10:53:38 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
11
12#include <flatbuffers/flatbuffers.h>
13#include <tensorflow/lite/interpreter.h>
14#include <tensorflow/lite/kernels/register.h>
15#include <tensorflow/lite/model.h>
Teresa Charlinad1b3d72023-03-14 12:10:28 +000016#include <schema_generated.h>
Sadik Armagan4b227bb2021-01-22 10:53:38 +000017#include <tensorflow/lite/version.h>
18
19#include <doctest/doctest.h>
20
21namespace
22{
23
24std::vector<char> CreateNormalizationTfLiteModel(tflite::BuiltinOperator normalizationOperatorCode,
25 tflite::TensorType tensorType,
26 const std::vector<int32_t>& inputTensorShape,
27 const std::vector<int32_t>& outputTensorShape,
28 int32_t radius,
29 float bias,
30 float alpha,
31 float beta,
32 float quantScale = 1.0f,
33 int quantOffset = 0)
34{
35 using namespace tflite;
36 flatbuffers::FlatBufferBuilder flatBufferBuilder;
37
38 auto quantizationParameters =
39 CreateQuantizationParameters(flatBufferBuilder,
40 0,
41 0,
42 flatBufferBuilder.CreateVector<float>({ quantScale }),
43 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
44
45 auto inputTensor = CreateTensor(flatBufferBuilder,
46 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
47 inputTensorShape.size()),
48 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000049 1,
Sadik Armagan4b227bb2021-01-22 10:53:38 +000050 flatBufferBuilder.CreateString("input"),
51 quantizationParameters);
52
53 auto outputTensor = CreateTensor(flatBufferBuilder,
54 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
55 outputTensorShape.size()),
56 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000057 2,
Sadik Armagan4b227bb2021-01-22 10:53:38 +000058 flatBufferBuilder.CreateString("output"),
59 quantizationParameters);
60
61 std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, outputTensor };
62
63 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +000064 buffers.push_back(CreateBuffer(flatBufferBuilder));
65 buffers.push_back(CreateBuffer(flatBufferBuilder));
66 buffers.push_back(CreateBuffer(flatBufferBuilder));
Sadik Armagan4b227bb2021-01-22 10:53:38 +000067
Keith Davis244b5bf2021-01-31 18:36:58 +000068 std::vector<int32_t> operatorInputs = { 0 };
69 std::vector<int> subgraphInputs = { 0 };
Sadik Armagan4b227bb2021-01-22 10:53:38 +000070
71 tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_L2NormOptions;
72 flatbuffers::Offset<void> operatorBuiltinOptions = CreateL2NormOptions(flatBufferBuilder,
73 tflite::ActivationFunctionType_NONE).Union();
74
75 if (normalizationOperatorCode == tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION)
76 {
77 operatorBuiltinOptionsType = BuiltinOptions_LocalResponseNormalizationOptions;
78 operatorBuiltinOptions =
79 CreateLocalResponseNormalizationOptions(flatBufferBuilder, radius, bias, alpha, beta).Union();
80 }
81
82 // create operator
Keith Davis244b5bf2021-01-31 18:36:58 +000083 const std::vector<int32_t> operatorOutputs{ 1 };
Sadik Armagan4b227bb2021-01-22 10:53:38 +000084 flatbuffers::Offset <Operator> normalizationOperator =
85 CreateOperator(flatBufferBuilder,
86 0,
87 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
88 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
89 operatorBuiltinOptionsType,
90 operatorBuiltinOptions);
91
Keith Davis244b5bf2021-01-31 18:36:58 +000092 const std::vector<int> subgraphOutputs{ 1 };
Sadik Armagan4b227bb2021-01-22 10:53:38 +000093 flatbuffers::Offset <SubGraph> subgraph =
94 CreateSubGraph(flatBufferBuilder,
95 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
96 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
97 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
98 flatBufferBuilder.CreateVector(&normalizationOperator, 1));
99
100 flatbuffers::Offset <flatbuffers::String> modelDescription =
101 flatBufferBuilder.CreateString("ArmnnDelegate: Normalization Operator Model");
102 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
103 normalizationOperatorCode);
104
105 flatbuffers::Offset <Model> flatbufferModel =
106 CreateModel(flatBufferBuilder,
107 TFLITE_SCHEMA_VERSION,
108 flatBufferBuilder.CreateVector(&operatorCode, 1),
109 flatBufferBuilder.CreateVector(&subgraph, 1),
110 modelDescription,
111 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
112
113 flatBufferBuilder.Finish(flatbufferModel);
114
115 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
116 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
117}
118
119template <typename T>
120void NormalizationTest(tflite::BuiltinOperator normalizationOperatorCode,
121 tflite::TensorType tensorType,
122 const std::vector<armnn::BackendId>& backends,
123 const std::vector<int32_t>& inputShape,
124 std::vector<int32_t>& outputShape,
125 std::vector<T>& inputValues,
126 std::vector<T>& expectedOutputValues,
127 int32_t radius = 0,
128 float bias = 0.f,
129 float alpha = 0.f,
130 float beta = 0.f,
131 float quantScale = 1.0f,
132 int quantOffset = 0)
133{
134 using namespace tflite;
135 std::vector<char> modelBuffer = CreateNormalizationTfLiteModel(normalizationOperatorCode,
136 tensorType,
137 inputShape,
138 outputShape,
139 radius,
140 bias,
141 alpha,
142 beta,
143 quantScale,
144 quantOffset);
145
146 const Model* tfLiteModel = GetModel(modelBuffer.data());
147 CHECK(tfLiteModel != nullptr);
148
149 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
150 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
151 (&armnnDelegateInterpreter) == kTfLiteOk);
152 CHECK(armnnDelegateInterpreter != nullptr);
153 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
154
155 std::unique_ptr<Interpreter> tfLiteInterpreter;
156 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
157 (&tfLiteInterpreter) == kTfLiteOk);
158 CHECK(tfLiteInterpreter != nullptr);
159 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
160
161 // Create the ArmNN Delegate
162 armnnDelegate::DelegateOptions delegateOptions(backends);
163 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
164 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
165 armnnDelegate::TfLiteArmnnDelegateDelete);
166 CHECK(theArmnnDelegate != nullptr);
167 // Modify armnnDelegateInterpreter to use armnnDelegate
168 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
169
170 // Set input data
171 armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
172 armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
173
174 // Run EnqueueWorkload
175 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
176 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
177
178 // Compare output data
179 armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
180}
181
Keith Davis7c67fab2021-04-08 11:47:23 +0100182void L2NormalizationTest(std::vector<armnn::BackendId>& backends)
183{
184 // Set input data
185 std::vector<int32_t> inputShape { 1, 1, 1, 10 };
186 std::vector<int32_t> outputShape { 1, 1, 1, 10 };
187
188 std::vector<float> inputValues
189 {
190 1.0f,
191 2.0f,
192 3.0f,
193 4.0f,
194 5.0f,
195 6.0f,
196 7.0f,
197 8.0f,
198 9.0f,
199 10.0f
200 };
201
202 const float approxInvL2Norm = 0.050964719f;
203 std::vector<float> expectedOutputValues
204 {
205 1.0f * approxInvL2Norm,
206 2.0f * approxInvL2Norm,
207 3.0f * approxInvL2Norm,
208 4.0f * approxInvL2Norm,
209 5.0f * approxInvL2Norm,
210 6.0f * approxInvL2Norm,
211 7.0f * approxInvL2Norm,
212 8.0f * approxInvL2Norm,
213 9.0f * approxInvL2Norm,
214 10.0f * approxInvL2Norm
215 };
216
217 NormalizationTest<float>(tflite::BuiltinOperator_L2_NORMALIZATION,
218 ::tflite::TensorType_FLOAT32,
219 backends,
220 inputShape,
221 outputShape,
222 inputValues,
223 expectedOutputValues);
224}
225
226void LocalResponseNormalizationTest(std::vector<armnn::BackendId>& backends,
227 int32_t radius,
228 float bias,
229 float alpha,
230 float beta)
231{
232 // Set input data
233 std::vector<int32_t> inputShape { 2, 2, 2, 1 };
234 std::vector<int32_t> outputShape { 2, 2, 2, 1 };
235
236 std::vector<float> inputValues
237 {
238 1.0f, 2.0f,
239 3.0f, 4.0f,
240 5.0f, 6.0f,
241 7.0f, 8.0f
242 };
243
244 std::vector<float> expectedOutputValues
245 {
246 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
247 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f
248 };
249
250 NormalizationTest<float>(tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
251 ::tflite::TensorType_FLOAT32,
252 backends,
253 inputShape,
254 outputShape,
255 inputValues,
256 expectedOutputValues,
257 radius,
258 bias,
259 alpha,
260 beta);
261}
262
Sadik Armagan4b227bb2021-01-22 10:53:38 +0000263} // anonymous namespace