blob: 3059f3b7a2e19ac06eed6a5fe759e0f5ddc34ab1 [file] [log] [blame]
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +00001//
Ryan OShea238ecd92023-03-07 11:44:23 +00002// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Jan Eilers3812fbc2020-11-17 19:06:35 +00008#include "TestUtils.hpp"
9
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000010#include <armnn_delegate.hpp>
Matthew Sloyanebe392d2023-03-30 10:12:08 +010011#include <DelegateTestInterpreter.hpp>
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000012
13#include <flatbuffers/flatbuffers.h>
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000014#include <tensorflow/lite/kernels/register.h>
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000015#include <tensorflow/lite/version.h>
16
17#include <doctest/doctest.h>
18
19namespace
20{
21
22std::vector<char> CreatePooling2dTfLiteModel(
23 tflite::BuiltinOperator poolingOperatorCode,
24 tflite::TensorType tensorType,
25 const std::vector <int32_t>& inputTensorShape,
26 const std::vector <int32_t>& outputTensorShape,
27 tflite::Padding padding = tflite::Padding_SAME,
28 int32_t strideWidth = 0,
29 int32_t strideHeight = 0,
30 int32_t filterWidth = 0,
31 int32_t filterHeight = 0,
32 tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
33 float quantScale = 1.0f,
34 int quantOffset = 0)
35{
36 using namespace tflite;
37 flatbuffers::FlatBufferBuilder flatBufferBuilder;
38
Ryan OShea238ecd92023-03-07 11:44:23 +000039 flatbuffers::Offset<tflite::Buffer> buffers[3] = {CreateBuffer(flatBufferBuilder),
40 CreateBuffer(flatBufferBuilder),
41 CreateBuffer(flatBufferBuilder)};
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000042
43 auto quantizationParameters =
44 CreateQuantizationParameters(flatBufferBuilder,
45 0,
46 0,
47 flatBufferBuilder.CreateVector<float>({ quantScale }),
48 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
49
Ryan OShea238ecd92023-03-07 11:44:23 +000050 flatbuffers::Offset<Tensor> tensors[2] {
51 CreateTensor(flatBufferBuilder,
52 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape),
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000053 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000054 1,
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000055 flatBufferBuilder.CreateString("input"),
Ryan OShea238ecd92023-03-07 11:44:23 +000056 quantizationParameters),
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000057
Ryan OShea238ecd92023-03-07 11:44:23 +000058 CreateTensor(flatBufferBuilder,
59 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape),
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000060 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000061 2,
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000062 flatBufferBuilder.CreateString("output"),
Ryan OShea238ecd92023-03-07 11:44:23 +000063 quantizationParameters)
64 };
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000065
66 // create operator
67 tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_Pool2DOptions;
68 flatbuffers::Offset<void> operatorBuiltinOptions = CreatePool2DOptions(flatBufferBuilder,
69 padding,
70 strideWidth,
71 strideHeight,
72 filterWidth,
73 filterHeight,
74 fusedActivation).Union();
75
Keith Davis892fafe2020-11-26 17:40:35 +000076 const std::vector<int32_t> operatorInputs{0};
77 const std::vector<int32_t> operatorOutputs{1};
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000078 flatbuffers::Offset <Operator> poolingOperator =
79 CreateOperator(flatBufferBuilder,
80 0,
Ryan OShea238ecd92023-03-07 11:44:23 +000081 flatBufferBuilder.CreateVector<int32_t>(operatorInputs),
82 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs),
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000083 operatorBuiltinOptionsType,
84 operatorBuiltinOptions);
85
Ryan OShea238ecd92023-03-07 11:44:23 +000086 const int subgraphInputs[1] = {0};
87 const int subgraphOutputs[1] = {1};
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000088 flatbuffers::Offset <SubGraph> subgraph =
89 CreateSubGraph(flatBufferBuilder,
Ryan OShea238ecd92023-03-07 11:44:23 +000090 flatBufferBuilder.CreateVector(tensors, 2),
91 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs, 1),
92 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs, 1),
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000093 flatBufferBuilder.CreateVector(&poolingOperator, 1));
94
95 flatbuffers::Offset <flatbuffers::String> modelDescription =
96 flatBufferBuilder.CreateString("ArmnnDelegate: Pooling2d Operator Model");
97 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, poolingOperatorCode);
98
99 flatbuffers::Offset <Model> flatbufferModel =
100 CreateModel(flatBufferBuilder,
101 TFLITE_SCHEMA_VERSION,
102 flatBufferBuilder.CreateVector(&operatorCode, 1),
103 flatBufferBuilder.CreateVector(&subgraph, 1),
104 modelDescription,
Ryan OShea238ecd92023-03-07 11:44:23 +0000105 flatBufferBuilder.CreateVector(buffers, 3));
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000106
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100107 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000108
109 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
110 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
111}
112
113template <typename T>
114void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode,
115 tflite::TensorType tensorType,
116 std::vector<armnn::BackendId>& backends,
117 std::vector<int32_t>& inputShape,
118 std::vector<int32_t>& outputShape,
119 std::vector<T>& inputValues,
120 std::vector<T>& expectedOutputValues,
121 tflite::Padding padding = tflite::Padding_SAME,
122 int32_t strideWidth = 0,
123 int32_t strideHeight = 0,
124 int32_t filterWidth = 0,
125 int32_t filterHeight = 0,
126 tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
127 float quantScale = 1.0f,
128 int quantOffset = 0)
129{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100130 using namespace delegateTestInterpreter;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000131 std::vector<char> modelBuffer = CreatePooling2dTfLiteModel(poolingOperatorCode,
132 tensorType,
133 inputShape,
134 outputShape,
135 padding,
136 strideWidth,
137 strideHeight,
138 filterWidth,
139 filterHeight,
140 fusedActivation,
141 quantScale,
142 quantOffset);
143
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100144 // Setup interpreter with just TFLite Runtime.
145 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
146 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
147 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
148 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
149 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
150 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000151
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100152 // Setup interpreter with Arm NN Delegate applied.
153 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
154 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
155 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
156 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
157 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
158 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000159
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100160 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
161 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000162
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100163 tfLiteInterpreter.Cleanup();
164 armnnInterpreter.Cleanup();
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000165}
166
167} // anonymous namespace
168
169
170
171