blob: b6c18ccdfb433756040372dc78fdd4d8f1c2930c [file] [log] [blame]
James Conroy39825482021-05-27 17:44:50 +01001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
11
12#include <flatbuffers/flatbuffers.h>
13#include <tensorflow/lite/interpreter.h>
14#include <tensorflow/lite/kernels/register.h>
15#include <tensorflow/lite/model.h>
16#include <tensorflow/lite/schema/schema_generated.h>
17#include <tensorflow/lite/version.h>
18
19#include <doctest/doctest.h>
20
21namespace
22{
23
24std::vector<char> CreatePreluTfLiteModel(tflite::BuiltinOperator preluOperatorCode,
25 tflite::TensorType tensorType,
26 const std::vector<int32_t>& inputShape,
27 const std::vector<int32_t>& alphaShape,
28 const std::vector<int32_t>& outputShape,
29 std::vector<float>& alphaData,
30 bool alphaIsConstant)
31{
32 using namespace tflite;
33 flatbuffers::FlatBufferBuilder flatBufferBuilder;
34
35 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
36 buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
37
38 buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(
39 reinterpret_cast<const uint8_t *>(alphaData.data()), sizeof(float) * alphaData.size())));
40
41 auto quantizationParameters =
42 CreateQuantizationParameters(flatBufferBuilder,
43 0,
44 0,
45 flatBufferBuilder.CreateVector<float>({ 1.0f }),
46 flatBufferBuilder.CreateVector<int64_t>({ 0 }));
47
48 auto inputTensor = CreateTensor(flatBufferBuilder,
49 flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
50 inputShape.size()),
51 tensorType,
52 0,
53 flatBufferBuilder.CreateString("input"),
54 quantizationParameters);
55
56 auto alphaTensor = CreateTensor(flatBufferBuilder,
57 flatBufferBuilder.CreateVector<int32_t>(alphaShape.data(),
58 alphaShape.size()),
59 tensorType,
60 1,
61 flatBufferBuilder.CreateString("alpha"),
62 quantizationParameters);
63
64 auto outputTensor = CreateTensor(flatBufferBuilder,
65 flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
66 outputShape.size()),
67 tensorType,
68 0,
69 flatBufferBuilder.CreateString("output"),
70 quantizationParameters);
71
72 std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, alphaTensor, outputTensor };
73
74 const std::vector<int> operatorInputs{0, 1};
75 const std::vector<int> operatorOutputs{2};
76 flatbuffers::Offset <Operator> preluOperator =
77 CreateOperator(flatBufferBuilder,
78 0,
79 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
80 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
81
82 std::vector<int> subgraphInputs{0};
83 if (!alphaIsConstant)
84 {
85 subgraphInputs.push_back(1);
86 }
87
88 const std::vector<int> subgraphOutputs{2};
89 flatbuffers::Offset <SubGraph> subgraph =
90 CreateSubGraph(flatBufferBuilder,
91 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
92 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
93 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
94 flatBufferBuilder.CreateVector(&preluOperator, 1));
95
96 flatbuffers::Offset <flatbuffers::String> modelDescription =
97 flatBufferBuilder.CreateString("ArmnnDelegate: Prelu Operator Model");
98 flatbuffers::Offset <OperatorCode> opCode = CreateOperatorCode(flatBufferBuilder, preluOperatorCode);
99
100 flatbuffers::Offset <Model> flatbufferModel =
101 CreateModel(flatBufferBuilder,
102 TFLITE_SCHEMA_VERSION,
103 flatBufferBuilder.CreateVector(&opCode, 1),
104 flatBufferBuilder.CreateVector(&subgraph, 1),
105 modelDescription,
106 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
107
108 flatBufferBuilder.Finish(flatbufferModel);
109
110 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
111 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
112}
113
114void PreluTest(tflite::BuiltinOperator preluOperatorCode,
115 tflite::TensorType tensorType,
116 const std::vector<armnn::BackendId>& backends,
117 const std::vector<int32_t>& inputShape,
118 const std::vector<int32_t>& alphaShape,
119 std::vector<int32_t>& outputShape,
120 std::vector<float>& inputData,
121 std::vector<float>& alphaData,
122 std::vector<float>& expectedOutput,
123 bool alphaIsConstant)
124{
125 using namespace tflite;
126
127 std::vector<char> modelBuffer = CreatePreluTfLiteModel(preluOperatorCode,
128 tensorType,
129 inputShape,
130 alphaShape,
131 outputShape,
132 alphaData,
133 alphaIsConstant);
134
135 const Model* tfLiteModel = GetModel(modelBuffer.data());
136
137 CHECK(tfLiteModel != nullptr);
138
139 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
140
141 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
142 (&armnnDelegateInterpreter) == kTfLiteOk);
143 CHECK(armnnDelegateInterpreter != nullptr);
144 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
145
146 std::unique_ptr<Interpreter> tfLiteInterpreter;
147
148 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
149 (&tfLiteInterpreter) == kTfLiteOk);
150 CHECK(tfLiteInterpreter != nullptr);
151 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
152
153 // Create the ArmNN Delegate
154 armnnDelegate::DelegateOptions delegateOptions(backends);
155
156 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
157 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
158 armnnDelegate::TfLiteArmnnDelegateDelete);
159 CHECK(theArmnnDelegate != nullptr);
160
161 // Modify armnnDelegateInterpreter to use armnnDelegate
162 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
163
164 // Set input data
165 armnnDelegate::FillInput<float>(tfLiteInterpreter, 0, inputData);
166 armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 0, inputData);
167
168 // Set alpha data if not constant
169 if (!alphaIsConstant) {
170 armnnDelegate::FillInput<float>(tfLiteInterpreter, 1, alphaData);
171 armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 1, alphaData);
172 }
173
174 // Run EnqueueWorkload
175 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
176 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
177
178 // Compare output data
179 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
180
181 auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
182
183 auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
184 auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
185
186 for (size_t i = 0; i < expectedOutput.size(); i++)
187 {
188 CHECK(expectedOutput[i] == armnnDelegateOutputData[i]);
189 CHECK(tfLiteDelegateOutputData[i] == expectedOutput[i]);
190 CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]);
191 }
192}
193} // anonymous namespace