blob: f5434740f3cb6482554cd8f29d80a8f014c5c611 [file] [log] [blame]
Narumol Prangnawarat958024b2020-12-17 12:17:58 +00001//
Colm Donelan7bcae3c2024-01-22 10:07:14 +00002// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
Narumol Prangnawarat958024b2020-12-17 12:17:58 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
Matthew Sloyanebe392d2023-03-30 10:12:08 +010011#include <DelegateTestInterpreter.hpp>
Narumol Prangnawarat958024b2020-12-17 12:17:58 +000012
Narumol Prangnawarat958024b2020-12-17 12:17:58 +000013#include <tensorflow/lite/version.h>
14
Narumol Prangnawarat958024b2020-12-17 12:17:58 +000015namespace
16{
17
18template <typename T>
19std::vector<char> CreatePadTfLiteModel(
20 tflite::BuiltinOperator padOperatorCode,
21 tflite::TensorType tensorType,
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +010022 tflite::MirrorPadMode paddingMode,
Narumol Prangnawarat958024b2020-12-17 12:17:58 +000023 const std::vector<int32_t>& inputTensorShape,
24 const std::vector<int32_t>& paddingTensorShape,
25 const std::vector<int32_t>& outputTensorShape,
26 const std::vector<int32_t>& paddingDim,
27 const std::vector<T> paddingValue,
28 float quantScale = 1.0f,
29 int quantOffset = 0)
30{
31 using namespace tflite;
32 flatbuffers::FlatBufferBuilder flatBufferBuilder;
33
34 auto quantizationParameters =
35 CreateQuantizationParameters(flatBufferBuilder,
36 0,
37 0,
38 flatBufferBuilder.CreateVector<float>({ quantScale }),
39 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
40
41 auto inputTensor = CreateTensor(flatBufferBuilder,
42 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
43 inputTensorShape.size()),
44 tensorType,
45 0,
46 flatBufferBuilder.CreateString("input"),
47 quantizationParameters);
48
49 auto paddingTensor = CreateTensor(flatBufferBuilder,
50 flatBufferBuilder.CreateVector<int32_t>(paddingTensorShape.data(),
51 paddingTensorShape.size()),
52 tflite::TensorType_INT32,
53 1,
54 flatBufferBuilder.CreateString("padding"));
55
56 auto outputTensor = CreateTensor(flatBufferBuilder,
57 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
58 outputTensorShape.size()),
59 tensorType,
60 2,
61 flatBufferBuilder.CreateString("output"),
62 quantizationParameters);
63
64 std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, paddingTensor, outputTensor};
65
66 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +000067 buffers.push_back(CreateBuffer(flatBufferBuilder));
Narumol Prangnawarat958024b2020-12-17 12:17:58 +000068 buffers.push_back(
69 CreateBuffer(flatBufferBuilder,
70 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingDim.data()),
Narumol Prangnawarat4cf0fe32020-12-18 16:13:06 +000071 sizeof(int32_t) * paddingDim.size())));
Ryan OShea238ecd92023-03-07 11:44:23 +000072 buffers.push_back(CreateBuffer(flatBufferBuilder));
Narumol Prangnawarat958024b2020-12-17 12:17:58 +000073
74 std::vector<int32_t> operatorInputs;
75 std::vector<int> subgraphInputs;
76
77 tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_PadOptions;
78 flatbuffers::Offset<void> operatorBuiltinOptions;
79
80 if (padOperatorCode == tflite::BuiltinOperator_PAD)
81 {
82 operatorInputs = {{ 0, 1 }};
83 subgraphInputs = {{ 0, 1 }};
84 operatorBuiltinOptions = CreatePadOptions(flatBufferBuilder).Union();
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +010085 }
86 else if(padOperatorCode == tflite::BuiltinOperator_MIRROR_PAD)
87 {
88 operatorInputs = {{ 0, 1 }};
89 subgraphInputs = {{ 0, 1 }};
Narumol Prangnawarat958024b2020-12-17 12:17:58 +000090
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +010091 operatorBuiltinOptionsType = BuiltinOptions_MirrorPadOptions;
92 operatorBuiltinOptions = CreateMirrorPadOptions(flatBufferBuilder, paddingMode).Union();
Narumol Prangnawarat958024b2020-12-17 12:17:58 +000093 }
94 else if (padOperatorCode == tflite::BuiltinOperator_PADV2)
95 {
96 buffers.push_back(
97 CreateBuffer(flatBufferBuilder,
98 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingValue.data()),
99 sizeof(T))));
100
101 const std::vector<int32_t> shape = { 1 };
102 auto padValueTensor = CreateTensor(flatBufferBuilder,
103 flatBufferBuilder.CreateVector<int32_t>(shape.data(),
104 shape.size()),
105 tensorType,
106 3,
107 flatBufferBuilder.CreateString("paddingValue"),
108 quantizationParameters);
109
110 tensors.push_back(padValueTensor);
111
112 operatorInputs = {{ 0, 1, 3 }};
113 subgraphInputs = {{ 0, 1, 3 }};
114
115 operatorBuiltinOptionsType = BuiltinOptions_PadV2Options;
116 operatorBuiltinOptions = CreatePadV2Options(flatBufferBuilder).Union();
117 }
118
119 // create operator
Keith Davisbbc876c2021-01-27 13:12:03 +0000120 const std::vector<int32_t> operatorOutputs{ 2 };
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +0100121 flatbuffers::Offset <Operator> paddingOperator =
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000122 CreateOperator(flatBufferBuilder,
123 0,
124 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
125 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
126 operatorBuiltinOptionsType,
127 operatorBuiltinOptions);
128
Keith Davisbbc876c2021-01-27 13:12:03 +0000129 const std::vector<int> subgraphOutputs{ 2 };
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000130 flatbuffers::Offset <SubGraph> subgraph =
131 CreateSubGraph(flatBufferBuilder,
132 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
133 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
134 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +0100135 flatBufferBuilder.CreateVector(&paddingOperator, 1));
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000136
137 flatbuffers::Offset <flatbuffers::String> modelDescription =
138 flatBufferBuilder.CreateString("ArmnnDelegate: Pad Operator Model");
139 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
140 padOperatorCode);
141
142 flatbuffers::Offset <Model> flatbufferModel =
143 CreateModel(flatBufferBuilder,
144 TFLITE_SCHEMA_VERSION,
145 flatBufferBuilder.CreateVector(&operatorCode, 1),
146 flatBufferBuilder.CreateVector(&subgraph, 1),
147 modelDescription,
148 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
149
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100150 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000151
152 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
153 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
154}
155
156template <typename T>
157void PadTest(tflite::BuiltinOperator padOperatorCode,
158 tflite::TensorType tensorType,
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000159 const std::vector<int32_t>& inputShape,
160 const std::vector<int32_t>& paddingShape,
161 std::vector<int32_t>& outputShape,
162 std::vector<T>& inputValues,
163 std::vector<int32_t>& paddingDim,
164 std::vector<T>& expectedOutputValues,
165 T paddingValue,
Colm Donelan7bcae3c2024-01-22 10:07:14 +0000166 const std::vector<armnn::BackendId>& backends = {},
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000167 float quantScale = 1.0f,
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +0100168 int quantOffset = 0,
169 tflite::MirrorPadMode paddingMode = tflite::MirrorPadMode_SYMMETRIC)
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000170{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100171 using namespace delegateTestInterpreter;
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000172 std::vector<char> modelBuffer = CreatePadTfLiteModel<T>(padOperatorCode,
173 tensorType,
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +0100174 paddingMode,
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000175 inputShape,
176 paddingShape,
177 outputShape,
178 paddingDim,
179 {paddingValue},
180 quantScale,
181 quantOffset);
182
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100183 // Setup interpreter with just TFLite Runtime.
184 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
185 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
186 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
187 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
188 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
189 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000190
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100191 // Setup interpreter with Arm NN Delegate applied.
Colm Donelan7bcae3c2024-01-22 10:07:14 +0000192 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100193 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
194 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
195 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
196 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
197 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000198
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100199 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
200 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000201
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100202 tfLiteInterpreter.Cleanup();
203 armnnInterpreter.Cleanup();
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000204}
205
Keith Davisbbc876c2021-01-27 13:12:03 +0000206} // anonymous namespace