blob: 3a35ee076462b9864718edd72f86044221cbd8f7 [file] [log] [blame]
Sadik Armagan788e2c62021-02-10 16:26:44 +00001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
11
12#include <flatbuffers/flatbuffers.h>
13#include <tensorflow/lite/interpreter.h>
14#include <tensorflow/lite/kernels/register.h>
15#include <tensorflow/lite/model.h>
16#include <tensorflow/lite/schema/schema_generated.h>
17#include <tensorflow/lite/version.h>
18
19#include <doctest/doctest.h>
20
21namespace
22{
23std::vector<char> CreateRoundTfLiteModel(tflite::BuiltinOperator roundOperatorCode,
24 tflite::TensorType tensorType,
25 const std::vector <int32_t>& tensorShape,
26 float quantScale = 1.0f,
27 int quantOffset = 0)
28{
29 using namespace tflite;
30 flatbuffers::FlatBufferBuilder flatBufferBuilder;
31
32 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
33 buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
34
35 auto quantizationParameters =
36 CreateQuantizationParameters(flatBufferBuilder,
37 0,
38 0,
39 flatBufferBuilder.CreateVector<float>({quantScale}),
40 flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
41
42 std::array<flatbuffers::Offset<Tensor>, 2> tensors;
43 tensors[0] = CreateTensor(flatBufferBuilder,
44 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
45 tensorShape.size()),
46 tensorType,
47 0,
48 flatBufferBuilder.CreateString("input"),
49 quantizationParameters);
50 tensors[1] = CreateTensor(flatBufferBuilder,
51 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
52 tensorShape.size()),
53 tensorType,
54 0,
55 flatBufferBuilder.CreateString("output"),
56 quantizationParameters);
57
58 const std::vector<int32_t> operatorInputs({0});
59 const std::vector<int32_t> operatorOutputs({1});
60
61 flatbuffers::Offset<Operator> roundOperator;
62 flatbuffers::Offset<flatbuffers::String> modelDescription;
63 flatbuffers::Offset<OperatorCode> operatorCode;
64
65 switch (roundOperatorCode)
66 {
67 case tflite::BuiltinOperator_FLOOR:
68 default:
69 roundOperator =
70 CreateOperator(flatBufferBuilder,
71 0,
72 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
73 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
74 modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Floor Operator Model");
75 operatorCode = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_FLOOR);
76 break;
77 }
78 const std::vector<int32_t> subgraphInputs({0});
79 const std::vector<int32_t> subgraphOutputs({1});
80 flatbuffers::Offset<SubGraph> subgraph =
81 CreateSubGraph(flatBufferBuilder,
82 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
83 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
84 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
85 flatBufferBuilder.CreateVector(&roundOperator, 1));
86
87 flatbuffers::Offset<Model> flatbufferModel =
88 CreateModel(flatBufferBuilder,
89 TFLITE_SCHEMA_VERSION,
90 flatBufferBuilder.CreateVector(&operatorCode, 1),
91 flatBufferBuilder.CreateVector(&subgraph, 1),
92 modelDescription,
93 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
94
95 flatBufferBuilder.Finish(flatbufferModel);
96 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
97 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
98}
99
100template<typename T>
101void RoundTest(tflite::BuiltinOperator roundOperatorCode,
102 tflite::TensorType tensorType,
103 std::vector<armnn::BackendId>& backends,
104 std::vector<int32_t>& shape,
105 std::vector<T>& inputValues,
106 std::vector<T>& expectedOutputValues,
107 float quantScale = 1.0f,
108 int quantOffset = 0)
109{
110 using namespace tflite;
111 std::vector<char> modelBuffer = CreateRoundTfLiteModel(roundOperatorCode,
112 tensorType,
113 shape,
114 quantScale,
115 quantOffset);
116
117 const Model* tfLiteModel = GetModel(modelBuffer.data());
118
119 // Create TfLite Interpreters
120 std::unique_ptr<Interpreter> armnnDelegate;
121 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
122 (&armnnDelegate) == kTfLiteOk);
123 CHECK(armnnDelegate != nullptr);
124 CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
125
126 std::unique_ptr<Interpreter> tfLiteDelegate;
127 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
128 (&tfLiteDelegate) == kTfLiteOk);
129 CHECK(tfLiteDelegate != nullptr);
130 CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
131
132 // Create the ArmNN Delegate
133 armnnDelegate::DelegateOptions delegateOptions(backends);
134 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
135 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
136 armnnDelegate::TfLiteArmnnDelegateDelete);
137 CHECK(theArmnnDelegate != nullptr);
138
139 // Modify armnnDelegateInterpreter to use armnnDelegate
140 CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
141
142 // Set input data
143 armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
144 armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
145
146 // Run EnqueWorkload
147 CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
148 CHECK(armnnDelegate->Invoke() == kTfLiteOk);
149
150 // Compare output data
151 armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
152 armnnDelegate,
153 shape,
154 expectedOutputValues,
155 0);
156
157 tfLiteDelegate.reset(nullptr);
158 armnnDelegate.reset(nullptr);
159}
160
161} // anonymous namespace