blob: 59d2e182280f7b0120a526a435e69636493b0aeb [file] [log] [blame]
Ryan OShead21abaf2022-06-10 14:49:11 +01001//
Ryan OShea238ecd92023-03-07 11:44:23 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Ryan OShead21abaf2022-06-10 14:49:11 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
Matthew Sloyanebe392d2023-03-30 10:12:08 +010011#include <DelegateTestInterpreter.hpp>
Ryan OShead21abaf2022-06-10 14:49:11 +010012
13#include <flatbuffers/flatbuffers.h>
14#include <flatbuffers/flexbuffers.h>
Ryan OShead21abaf2022-06-10 14:49:11 +010015#include <tensorflow/lite/kernels/register.h>
Matthew Sloyanebe392d2023-03-30 10:12:08 +010016#include <tensorflow/lite/kernels/custom_ops_register.h>
Ryan OShead21abaf2022-06-10 14:49:11 +010017#include <tensorflow/lite/version.h>
18
Matthew Sloyanebe392d2023-03-30 10:12:08 +010019#include <schema_generated.h>
20
Ryan OShead21abaf2022-06-10 14:49:11 +010021#include <doctest/doctest.h>
22
23namespace
24{
25#if defined(ARMNN_POST_TFLITE_2_5)
26
27std::vector<uint8_t> CreateCustomOptions(int, int, int, int, int, int, TfLitePadding);
28
29std::vector<char> CreatePooling3dTfLiteModel(
30 std::string poolType,
31 tflite::TensorType tensorType,
32 const std::vector<int32_t>& inputTensorShape,
33 const std::vector<int32_t>& outputTensorShape,
34 TfLitePadding padding = kTfLitePaddingSame,
35 int32_t strideWidth = 0,
36 int32_t strideHeight = 0,
37 int32_t strideDepth = 0,
38 int32_t filterWidth = 0,
39 int32_t filterHeight = 0,
40 int32_t filterDepth = 0,
41 tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
42 float quantScale = 1.0f,
43 int quantOffset = 0)
44{
45 using namespace tflite;
46 flatbuffers::FlatBufferBuilder flatBufferBuilder;
47
48 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +000049 buffers.push_back(CreateBuffer(flatBufferBuilder));
50 buffers.push_back(CreateBuffer(flatBufferBuilder));
51 buffers.push_back(CreateBuffer(flatBufferBuilder));
52
Ryan OShead21abaf2022-06-10 14:49:11 +010053
54 auto quantizationParameters =
55 CreateQuantizationParameters(flatBufferBuilder,
56 0,
57 0,
58 flatBufferBuilder.CreateVector<float>({ quantScale }),
59 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
60
61 // Create the input and output tensors
62 std::array<flatbuffers::Offset<Tensor>, 2> tensors;
63 tensors[0] = CreateTensor(flatBufferBuilder,
64 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
65 inputTensorShape.size()),
66 tensorType,
67 0,
68 flatBufferBuilder.CreateString("input"),
69 quantizationParameters);
70
71 tensors[1] = CreateTensor(flatBufferBuilder,
72 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
73 outputTensorShape.size()),
74 tensorType,
75 0,
76 flatBufferBuilder.CreateString("output"),
77 quantizationParameters);
78
79 // Create the custom options from the function below
80 std::vector<uint8_t> customOperatorOptions = CreateCustomOptions(strideHeight, strideWidth, strideDepth,
81 filterHeight, filterWidth, filterDepth, padding);
82 // opCodeIndex is created as a uint8_t to avoid map lookup
83 uint8_t opCodeIndex = 0;
84 // Set the operator name based on the PoolType passed in from the test case
85 std::string opName = "";
86 if (poolType == "kMax")
87 {
88 opName = "MaxPool3D";
89 }
90 else
91 {
92 opName = "AveragePool3D";
93 }
94 // To create a custom operator code you pass in the builtin code for custom operators and the name of the custom op
95 flatbuffers::Offset<OperatorCode> operatorCode = CreateOperatorCodeDirect(flatBufferBuilder,
96 tflite::BuiltinOperator_CUSTOM,
97 opName.c_str());
98
99 // Create the Operator using the opCodeIndex and custom options. Also sets builtin options to none.
100 const std::vector<int32_t> operatorInputs{ 0 };
101 const std::vector<int32_t> operatorOutputs{ 1 };
102 flatbuffers::Offset<Operator> poolingOperator =
103 CreateOperator(flatBufferBuilder,
104 opCodeIndex,
105 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
106 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
107 tflite::BuiltinOptions_NONE,
108 0,
109 flatBufferBuilder.CreateVector<uint8_t>(customOperatorOptions),
110 tflite::CustomOptionsFormat_FLEXBUFFERS);
111
112 // Create the subgraph using the operator created above.
113 const std::vector<int> subgraphInputs{ 0 };
114 const std::vector<int> subgraphOutputs{ 1 };
115 flatbuffers::Offset<SubGraph> subgraph =
116 CreateSubGraph(flatBufferBuilder,
117 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
118 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
119 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
120 flatBufferBuilder.CreateVector(&poolingOperator, 1));
121
122 flatbuffers::Offset<flatbuffers::String> modelDescription =
123 flatBufferBuilder.CreateString("ArmnnDelegate: Pooling3d Operator Model");
124
125 // Create the model using operatorCode and the subgraph.
126 flatbuffers::Offset<Model> flatbufferModel =
127 CreateModel(flatBufferBuilder,
128 TFLITE_SCHEMA_VERSION,
129 flatBufferBuilder.CreateVector(&operatorCode, 1),
130 flatBufferBuilder.CreateVector(&subgraph, 1),
131 modelDescription,
132 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
133
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100134 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Ryan OShead21abaf2022-06-10 14:49:11 +0100135
136 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
137 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
138}
139
140template<typename T>
141void Pooling3dTest(std::string poolType,
142 tflite::TensorType tensorType,
143 std::vector<armnn::BackendId>& backends,
144 std::vector<int32_t>& inputShape,
145 std::vector<int32_t>& outputShape,
146 std::vector<T>& inputValues,
147 std::vector<T>& expectedOutputValues,
148 TfLitePadding padding = kTfLitePaddingSame,
149 int32_t strideWidth = 0,
150 int32_t strideHeight = 0,
151 int32_t strideDepth = 0,
152 int32_t filterWidth = 0,
153 int32_t filterHeight = 0,
154 int32_t filterDepth = 0,
155 tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
156 float quantScale = 1.0f,
157 int quantOffset = 0)
158{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100159 using namespace delegateTestInterpreter;
Ryan OShead21abaf2022-06-10 14:49:11 +0100160 // Create the single op model buffer
161 std::vector<char> modelBuffer = CreatePooling3dTfLiteModel(poolType,
162 tensorType,
163 inputShape,
164 outputShape,
165 padding,
166 strideWidth,
167 strideHeight,
168 strideDepth,
169 filterWidth,
170 filterHeight,
171 filterDepth,
172 fusedActivation,
173 quantScale,
174 quantOffset);
175
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100176 std::string opType = "";
Ryan OShead21abaf2022-06-10 14:49:11 +0100177 if (poolType == "kMax")
178 {
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100179 opType = "MaxPool3D";
Ryan OShead21abaf2022-06-10 14:49:11 +0100180 }
181 else
182 {
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100183 opType = "AveragePool3D";
Ryan OShead21abaf2022-06-10 14:49:11 +0100184 }
185
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100186 // Setup interpreter with just TFLite Runtime.
187 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer, opType);
188 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
189 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
190 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
191 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
192 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
Ryan OShead21abaf2022-06-10 14:49:11 +0100193
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100194 // Setup interpreter with Arm NN Delegate applied.
195 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends, opType);
196 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
197 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
198 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
199 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
200 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
Ryan OShead21abaf2022-06-10 14:49:11 +0100201
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100202 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
203 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
Ryan OShead21abaf2022-06-10 14:49:11 +0100204
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100205 tfLiteInterpreter.Cleanup();
206 armnnInterpreter.Cleanup();
Ryan OShead21abaf2022-06-10 14:49:11 +0100207}
208
209// Function to create the flexbuffer custom options for the custom pooling3d operator.
210std::vector<uint8_t> CreateCustomOptions(int strideHeight, int strideWidth, int strideDepth,
211 int filterHeight, int filterWidth, int filterDepth, TfLitePadding padding)
212{
213 auto flex_builder = std::make_unique<flexbuffers::Builder>();
214 size_t map_start = flex_builder->StartMap();
215 flex_builder->String("data_format", "NDHWC");
216 // Padding is created as a key and padding type. Only VALID and SAME supported
217 if (padding == kTfLitePaddingValid)
218 {
219 flex_builder->String("padding", "VALID");
220 }
221 else
222 {
223 flex_builder->String("padding", "SAME");
224 }
225
226 // Vector of filter dimensions in order ( 1, Depth, Height, Width, 1 )
227 auto start = flex_builder->StartVector("ksize");
228 flex_builder->Add(1);
229 flex_builder->Add(filterDepth);
230 flex_builder->Add(filterHeight);
231 flex_builder->Add(filterWidth);
232 flex_builder->Add(1);
233 // EndVector( start, bool typed, bool fixed)
234 flex_builder->EndVector(start, true, false);
235
236 // Vector of stride dimensions in order ( 1, Depth, Height, Width, 1 )
237 auto stridesStart = flex_builder->StartVector("strides");
238 flex_builder->Add(1);
239 flex_builder->Add(strideDepth);
240 flex_builder->Add(strideHeight);
241 flex_builder->Add(strideWidth);
242 flex_builder->Add(1);
243 // EndVector( stridesStart, bool typed, bool fixed)
244 flex_builder->EndVector(stridesStart, true, false);
245
246 flex_builder->EndMap(map_start);
247 flex_builder->Finish();
248
249 return flex_builder->GetBuffer();
250}
251#endif
252} // anonymous namespace
253
254
255
256