blob: 7221dedb202b4fe068ef75b38c04467fde2b5494 [file] [log] [blame]
Narumol Prangnawarat958024b2020-12-17 12:17:58 +00001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
11
12#include <flatbuffers/flatbuffers.h>
13#include <tensorflow/lite/interpreter.h>
14#include <tensorflow/lite/kernels/register.h>
15#include <tensorflow/lite/model.h>
16#include <tensorflow/lite/schema/schema_generated.h>
17#include <tensorflow/lite/version.h>
18
19#include <doctest/doctest.h>
20
21namespace
22{
23
24template <typename T>
25std::vector<char> CreatePadTfLiteModel(
26 tflite::BuiltinOperator padOperatorCode,
27 tflite::TensorType tensorType,
28 const std::vector<int32_t>& inputTensorShape,
29 const std::vector<int32_t>& paddingTensorShape,
30 const std::vector<int32_t>& outputTensorShape,
31 const std::vector<int32_t>& paddingDim,
32 const std::vector<T> paddingValue,
33 float quantScale = 1.0f,
34 int quantOffset = 0)
35{
36 using namespace tflite;
37 flatbuffers::FlatBufferBuilder flatBufferBuilder;
38
39 auto quantizationParameters =
40 CreateQuantizationParameters(flatBufferBuilder,
41 0,
42 0,
43 flatBufferBuilder.CreateVector<float>({ quantScale }),
44 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
45
46 auto inputTensor = CreateTensor(flatBufferBuilder,
47 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
48 inputTensorShape.size()),
49 tensorType,
50 0,
51 flatBufferBuilder.CreateString("input"),
52 quantizationParameters);
53
54 auto paddingTensor = CreateTensor(flatBufferBuilder,
55 flatBufferBuilder.CreateVector<int32_t>(paddingTensorShape.data(),
56 paddingTensorShape.size()),
57 tflite::TensorType_INT32,
58 1,
59 flatBufferBuilder.CreateString("padding"));
60
61 auto outputTensor = CreateTensor(flatBufferBuilder,
62 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
63 outputTensorShape.size()),
64 tensorType,
65 2,
66 flatBufferBuilder.CreateString("output"),
67 quantizationParameters);
68
69 std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, paddingTensor, outputTensor};
70
71 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
72 buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
73 buffers.push_back(
74 CreateBuffer(flatBufferBuilder,
75 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingDim.data()),
76 sizeof(int32_t) * paddingDim.size())));
77 buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
78
79 std::vector<int32_t> operatorInputs;
80 std::vector<int> subgraphInputs;
81
82 tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_PadOptions;
83 flatbuffers::Offset<void> operatorBuiltinOptions;
84
85 if (padOperatorCode == tflite::BuiltinOperator_PAD)
86 {
87 operatorInputs = {{ 0, 1 }};
88 subgraphInputs = {{ 0, 1 }};
89 operatorBuiltinOptions = CreatePadOptions(flatBufferBuilder).Union();
90
91 }
92 else if (padOperatorCode == tflite::BuiltinOperator_PADV2)
93 {
94 buffers.push_back(
95 CreateBuffer(flatBufferBuilder,
96 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingValue.data()),
97 sizeof(T))));
98
99 const std::vector<int32_t> shape = { 1 };
100 auto padValueTensor = CreateTensor(flatBufferBuilder,
101 flatBufferBuilder.CreateVector<int32_t>(shape.data(),
102 shape.size()),
103 tensorType,
104 3,
105 flatBufferBuilder.CreateString("paddingValue"),
106 quantizationParameters);
107
108 tensors.push_back(padValueTensor);
109
110 operatorInputs = {{ 0, 1, 3 }};
111 subgraphInputs = {{ 0, 1, 3 }};
112
113 operatorBuiltinOptionsType = BuiltinOptions_PadV2Options;
114 operatorBuiltinOptions = CreatePadV2Options(flatBufferBuilder).Union();
115 }
116
117 // create operator
118 const std::vector<int32_t> operatorOutputs{{ 2 }};
119 flatbuffers::Offset <Operator> redefineOperator =
120 CreateOperator(flatBufferBuilder,
121 0,
122 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
123 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
124 operatorBuiltinOptionsType,
125 operatorBuiltinOptions);
126
127 const std::vector<int> subgraphOutputs{{ 2 }};
128 flatbuffers::Offset <SubGraph> subgraph =
129 CreateSubGraph(flatBufferBuilder,
130 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
131 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
132 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
133 flatBufferBuilder.CreateVector(&redefineOperator, 1));
134
135 flatbuffers::Offset <flatbuffers::String> modelDescription =
136 flatBufferBuilder.CreateString("ArmnnDelegate: Pad Operator Model");
137 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
138 padOperatorCode);
139
140 flatbuffers::Offset <Model> flatbufferModel =
141 CreateModel(flatBufferBuilder,
142 TFLITE_SCHEMA_VERSION,
143 flatBufferBuilder.CreateVector(&operatorCode, 1),
144 flatBufferBuilder.CreateVector(&subgraph, 1),
145 modelDescription,
146 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
147
148 flatBufferBuilder.Finish(flatbufferModel);
149
150 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
151 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
152}
153
154template <typename T>
155void PadTest(tflite::BuiltinOperator padOperatorCode,
156 tflite::TensorType tensorType,
157 const std::vector<armnn::BackendId>& backends,
158 const std::vector<int32_t>& inputShape,
159 const std::vector<int32_t>& paddingShape,
160 std::vector<int32_t>& outputShape,
161 std::vector<T>& inputValues,
162 std::vector<int32_t>& paddingDim,
163 std::vector<T>& expectedOutputValues,
164 T paddingValue,
165 float quantScale = 1.0f,
166 int quantOffset = 0)
167{
168 using namespace tflite;
169 std::vector<char> modelBuffer = CreatePadTfLiteModel<T>(padOperatorCode,
170 tensorType,
171 inputShape,
172 paddingShape,
173 outputShape,
174 paddingDim,
175 {paddingValue},
176 quantScale,
177 quantOffset);
178
179 const Model* tfLiteModel = GetModel(modelBuffer.data());
180 CHECK(tfLiteModel != nullptr);
181
182 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
183 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
184 (&armnnDelegateInterpreter) == kTfLiteOk);
185 CHECK(armnnDelegateInterpreter != nullptr);
186 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
187
188 std::unique_ptr<Interpreter> tfLiteInterpreter;
189 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
190 (&tfLiteInterpreter) == kTfLiteOk);
191 CHECK(tfLiteInterpreter != nullptr);
192 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
193
194 // Create the ArmNN Delegate
195 armnnDelegate::DelegateOptions delegateOptions(backends);
196 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
197 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
198 armnnDelegate::TfLiteArmnnDelegateDelete);
199 CHECK(theArmnnDelegate != nullptr);
200 // Modify armnnDelegateInterpreter to use armnnDelegate
201 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
202
203 // Set input data
204 armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
205 armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
206
207 // Run EnqueueWorkload
208 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
209 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
210
211 armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
212}
213
214} // anonymous namespace