blob: c8e9a72b9704c3f1e17ec6c250796737eef525aa [file] [log] [blame]
Teresa Charlind5c0ed22022-04-25 18:23:41 +01001//
Ryan OShea238ecd92023-03-07 11:44:23 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Teresa Charlind5c0ed22022-04-25 18:23:41 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
Matthew Sloyanebe392d2023-03-30 10:12:08 +010011#include <DelegateTestInterpreter.hpp>
Teresa Charlind5c0ed22022-04-25 18:23:41 +010012
13#include <flatbuffers/flatbuffers.h>
Teresa Charlind5c0ed22022-04-25 18:23:41 +010014#include <tensorflow/lite/kernels/register.h>
Teresa Charlind5c0ed22022-04-25 18:23:41 +010015#include <tensorflow/lite/version.h>
16
17#include <doctest/doctest.h>
18
19namespace
20{
21
22std::vector<char> CreateGatherNdTfLiteModel(tflite::TensorType tensorType,
23 std::vector<int32_t>& paramsShape,
24 std::vector<int32_t>& indicesShape,
25 const std::vector<int32_t>& expectedOutputShape,
26 float quantScale = 1.0f,
27 int quantOffset = 0)
28{
29 using namespace tflite;
30 flatbuffers::FlatBufferBuilder flatBufferBuilder;
31
32 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +000033 buffers.push_back(CreateBuffer(flatBufferBuilder));
34 buffers.push_back(CreateBuffer(flatBufferBuilder));
35 buffers.push_back(CreateBuffer(flatBufferBuilder));
36 buffers.push_back(CreateBuffer(flatBufferBuilder));
Teresa Charlind5c0ed22022-04-25 18:23:41 +010037
38 auto quantizationParameters =
39 CreateQuantizationParameters(flatBufferBuilder,
40 0,
41 0,
42 flatBufferBuilder.CreateVector<float>({quantScale}),
43 flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
44
45 std::array<flatbuffers::Offset<Tensor>, 3> tensors;
46 tensors[0] = CreateTensor(flatBufferBuilder,
47 flatBufferBuilder.CreateVector<int32_t>(paramsShape.data(),
48 paramsShape.size()),
49 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000050 1,
Teresa Charlind5c0ed22022-04-25 18:23:41 +010051 flatBufferBuilder.CreateString("params"),
52 quantizationParameters);
53 tensors[1] = CreateTensor(flatBufferBuilder,
54 flatBufferBuilder.CreateVector<int32_t>(indicesShape.data(),
55 indicesShape.size()),
56 ::tflite::TensorType_INT32,
Ryan OShea238ecd92023-03-07 11:44:23 +000057 2,
Teresa Charlind5c0ed22022-04-25 18:23:41 +010058 flatBufferBuilder.CreateString("indices"),
59 quantizationParameters);
60 tensors[2] = CreateTensor(flatBufferBuilder,
61 flatBufferBuilder.CreateVector<int32_t>(expectedOutputShape.data(),
62 expectedOutputShape.size()),
63 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000064 3,
Teresa Charlind5c0ed22022-04-25 18:23:41 +010065 flatBufferBuilder.CreateString("output"),
66 quantizationParameters);
67
68
69 // create operator
70 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_GatherNdOptions;
71 flatbuffers::Offset<void> operatorBuiltinOptions = CreateGatherNdOptions(flatBufferBuilder).Union();
72
73 const std::vector<int> operatorInputs{{0, 1}};
74 const std::vector<int> operatorOutputs{2};
75 flatbuffers::Offset<Operator> controlOperator =
76 CreateOperator(flatBufferBuilder,
77 0,
78 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
79 operatorInputs.size()),
80 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
81 operatorOutputs.size()),
82 operatorBuiltinOptionsType,
83 operatorBuiltinOptions);
84
85 const std::vector<int> subgraphInputs{{0, 1}};
86 const std::vector<int> subgraphOutputs{2};
87 flatbuffers::Offset<SubGraph> subgraph =
88 CreateSubGraph(flatBufferBuilder,
89 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
90 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(),
91 subgraphInputs.size()),
92 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
93 subgraphOutputs.size()),
94 flatBufferBuilder.CreateVector(&controlOperator, 1));
95
96 flatbuffers::Offset<flatbuffers::String> modelDescription =
97 flatBufferBuilder.CreateString("ArmnnDelegate: GATHER_ND Operator Model");
98 flatbuffers::Offset<OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
99 BuiltinOperator_GATHER_ND);
100
101 flatbuffers::Offset<Model> flatbufferModel =
102 CreateModel(flatBufferBuilder,
103 TFLITE_SCHEMA_VERSION,
104 flatBufferBuilder.CreateVector(&operatorCode, 1),
105 flatBufferBuilder.CreateVector(&subgraph, 1),
106 modelDescription,
107 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
108
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100109 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Teresa Charlind5c0ed22022-04-25 18:23:41 +0100110
111 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
112 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
113}
114
115template<typename T>
116void GatherNdTest(tflite::TensorType tensorType,
117 std::vector<armnn::BackendId>& backends,
118 std::vector<int32_t>& paramsShape,
119 std::vector<int32_t>& indicesShape,
120 std::vector<int32_t>& expectedOutputShape,
121 std::vector<T>& paramsValues,
122 std::vector<int32_t>& indicesValues,
123 std::vector<T>& expectedOutputValues,
124 float quantScale = 1.0f,
125 int quantOffset = 0)
126{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100127 using namespace delegateTestInterpreter;
Teresa Charlind5c0ed22022-04-25 18:23:41 +0100128 std::vector<char> modelBuffer = CreateGatherNdTfLiteModel(tensorType,
129 paramsShape,
130 indicesShape,
131 expectedOutputShape,
132 quantScale,
133 quantOffset);
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100134 // Setup interpreter with just TFLite Runtime.
135 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
136 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
137 CHECK(tfLiteInterpreter.FillInputTensor<T>(paramsValues, 0) == kTfLiteOk);
138 CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(indicesValues, 1) == kTfLiteOk);
139 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
140 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
141 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
Teresa Charlind5c0ed22022-04-25 18:23:41 +0100142
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100143 // Setup interpreter with Arm NN Delegate applied.
144 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
145 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
146 CHECK(armnnInterpreter.FillInputTensor<T>(paramsValues, 0) == kTfLiteOk);
147 CHECK(armnnInterpreter.FillInputTensor<int32_t>(indicesValues, 1) == kTfLiteOk);
148 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
149 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
150 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
Teresa Charlind5c0ed22022-04-25 18:23:41 +0100151
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100152 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
153 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
Teresa Charlind5c0ed22022-04-25 18:23:41 +0100154
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100155 tfLiteInterpreter.Cleanup();
156 armnnInterpreter.Cleanup();
Teresa Charlind5c0ed22022-04-25 18:23:41 +0100157}
158} // anonymous namespace