blob: 6b1d5ee9473f38c4a889bbb4f0fa4ee45b328f54 [file] [log] [blame]
Sadik Armagan937565b2021-04-21 14:03:28 +01001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
11
12#include <flatbuffers/flatbuffers.h>
13#include <tensorflow/lite/interpreter.h>
14#include <tensorflow/lite/kernels/register.h>
15#include <tensorflow/lite/model.h>
16#include <tensorflow/lite/schema/schema_generated.h>
17#include <tensorflow/lite/version.h>
18
19#include <doctest/doctest.h>
20
21namespace
22{
23std::vector<char> CreateCastTfLiteModel(tflite::TensorType inputTensorType,
24 tflite::TensorType outputTensorType,
25 const std::vector <int32_t>& tensorShape,
26 float quantScale = 1.0f,
27 int quantOffset = 0)
28{
29 using namespace tflite;
30 flatbuffers::FlatBufferBuilder flatBufferBuilder;
31
32 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
33 buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
34
35 auto quantizationParameters =
36 CreateQuantizationParameters(flatBufferBuilder,
37 0,
38 0,
39 flatBufferBuilder.CreateVector<float>({quantScale}),
40 flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
41
42 std::array<flatbuffers::Offset<Tensor>, 2> tensors;
43 tensors[0] = CreateTensor(flatBufferBuilder,
44 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
45 tensorShape.size()),
46 inputTensorType,
47 0,
48 flatBufferBuilder.CreateString("input"),
49 quantizationParameters);
50 tensors[1] = CreateTensor(flatBufferBuilder,
51 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
52 tensorShape.size()),
53 outputTensorType,
54 0,
55 flatBufferBuilder.CreateString("output"),
56 quantizationParameters);
57
58 const std::vector<int32_t> operatorInputs({0});
59 const std::vector<int32_t> operatorOutputs({1});
60
61 flatbuffers::Offset<Operator> castOperator =
62 CreateOperator(flatBufferBuilder,
63 0,
64 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
65 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
66 BuiltinOptions_CastOptions,
67 CreateCastOptions(flatBufferBuilder).Union());
68
69 flatbuffers::Offset<flatbuffers::String> modelDescription =
70 flatBufferBuilder.CreateString("ArmnnDelegate: CAST Operator Model");
71 flatbuffers::Offset<OperatorCode> operatorCode =
72 CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_CAST);
73
74 const std::vector<int32_t> subgraphInputs({0});
75 const std::vector<int32_t> subgraphOutputs({1});
76 flatbuffers::Offset<SubGraph> subgraph =
77 CreateSubGraph(flatBufferBuilder,
78 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
79 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
80 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
81 flatBufferBuilder.CreateVector(&castOperator, 1));
82
83 flatbuffers::Offset<Model> flatbufferModel =
84 CreateModel(flatBufferBuilder,
85 TFLITE_SCHEMA_VERSION,
86 flatBufferBuilder.CreateVector(&operatorCode, 1),
87 flatBufferBuilder.CreateVector(&subgraph, 1),
88 modelDescription,
89 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
90
91 flatBufferBuilder.Finish(flatbufferModel);
92 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
93 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
94}
95
96template<typename T, typename K>
97void CastTest(tflite::TensorType inputTensorType,
98 tflite::TensorType outputTensorType,
99 std::vector<armnn::BackendId>& backends,
100 std::vector<int32_t>& shape,
101 std::vector<T>& inputValues,
102 std::vector<K>& expectedOutputValues,
103 float quantScale = 1.0f,
104 int quantOffset = 0)
105{
106 using namespace tflite;
107 std::vector<char> modelBuffer = CreateCastTfLiteModel(inputTensorType,
108 outputTensorType,
109 shape,
110 quantScale,
111 quantOffset);
112
113 const Model* tfLiteModel = GetModel(modelBuffer.data());
114
115 // Create TfLite Interpreters
116 std::unique_ptr<Interpreter> armnnDelegate;
117 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
118 (&armnnDelegate) == kTfLiteOk);
119 CHECK(armnnDelegate != nullptr);
120 CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
121
122 std::unique_ptr<Interpreter> tfLiteDelegate;
123 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
124 (&tfLiteDelegate) == kTfLiteOk);
125 CHECK(tfLiteDelegate != nullptr);
126 CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
127
128 // Create the ArmNN Delegate
129 armnnDelegate::DelegateOptions delegateOptions(backends);
130 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
131 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
132 armnnDelegate::TfLiteArmnnDelegateDelete);
133 CHECK(theArmnnDelegate != nullptr);
134
135 // Modify armnnDelegateInterpreter to use armnnDelegate
136 CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
137
138 // Set input data
139 armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
140 armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
141
142 // Run EnqueWorkload
143 CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
144 CHECK(armnnDelegate->Invoke() == kTfLiteOk);
145
146 // Compare output data
147 armnnDelegate::CompareOutputData<K>(tfLiteDelegate,
148 armnnDelegate,
149 shape,
150 expectedOutputValues,
151 0);
152
153 tfLiteDelegate.reset(nullptr);
154 armnnDelegate.reset(nullptr);
155}
156
157} // anonymous namespace