blob: d9a783c6a7afa1a13e178f988faeed33fa04c2de [file] [log] [blame]
Sadik Armagan89c5a9e2021-01-20 17:48:07 +00001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
11
12#include <flatbuffers/flatbuffers.h>
13#include <tensorflow/lite/interpreter.h>
14#include <tensorflow/lite/kernels/register.h>
15#include <tensorflow/lite/model.h>
16#include <tensorflow/lite/schema/schema_generated.h>
17#include <tensorflow/lite/version.h>
18
19#include <doctest/doctest.h>
20
21namespace
22{
23std::vector<char> CreateSpaceDepthTfLiteModel(tflite::BuiltinOperator spaceDepthOperatorCode,
24 tflite::TensorType tensorType,
25 const std::vector <int32_t>& inputTensorShape,
26 const std::vector <int32_t>& outputTensorShape,
27 int32_t blockSize)
28{
29 using namespace tflite;
30 flatbuffers::FlatBufferBuilder flatBufferBuilder;
31
32 auto quantizationParameters =
33 CreateQuantizationParameters(flatBufferBuilder,
34 0,
35 0,
36 flatBufferBuilder.CreateVector<float>({ 1.0f }),
37 flatBufferBuilder.CreateVector<int64_t>({ 0 }));
38
39 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
40 buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
41
42 std::array<flatbuffers::Offset<Tensor>, 2> tensors;
43 tensors[0] = CreateTensor(flatBufferBuilder,
44 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
45 inputTensorShape.size()),
46 tensorType,
47 0,
48 flatBufferBuilder.CreateString("input"),
49 quantizationParameters);
50 tensors[1] = CreateTensor(flatBufferBuilder,
51 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
52 outputTensorShape.size()),
53 tensorType,
54 0,
55 flatBufferBuilder.CreateString("output"),
56 quantizationParameters);
57
58 const std::vector<int32_t> operatorInputs({0});
59 const std::vector<int32_t> operatorOutputs({1});
60
61 flatbuffers::Offset<Operator> spaceDepthOperator;
62 flatbuffers::Offset<flatbuffers::String> modelDescription;
63 flatbuffers::Offset<OperatorCode> operatorCode;
64
65 switch (spaceDepthOperatorCode)
66 {
67 case tflite::BuiltinOperator_SPACE_TO_DEPTH:
68 spaceDepthOperator =
69 CreateOperator(flatBufferBuilder,
70 0,
71 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
72 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
73 BuiltinOptions_SpaceToDepthOptions,
74 CreateSpaceToDepthOptions(flatBufferBuilder, blockSize).Union());
75 modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: SPACE_TO_DEPTH Operator Model");
76 operatorCode = CreateOperatorCode(flatBufferBuilder,
77 tflite::BuiltinOperator_SPACE_TO_DEPTH);
78 break;
79 case tflite::BuiltinOperator_DEPTH_TO_SPACE:
80 spaceDepthOperator =
81 CreateOperator(flatBufferBuilder,
82 0,
83 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
84 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
85 BuiltinOptions_DepthToSpaceOptions,
86 CreateDepthToSpaceOptions(flatBufferBuilder, blockSize).Union());
87 flatBufferBuilder.CreateString("ArmnnDelegate: DEPTH_TO_SPACE Operator Model");
88 operatorCode = CreateOperatorCode(flatBufferBuilder,
89 tflite::BuiltinOperator_DEPTH_TO_SPACE);
90 break;
91 default:
92 break;
93 }
94 const std::vector<int32_t> subgraphInputs({0});
95 const std::vector<int32_t> subgraphOutputs({1});
96 flatbuffers::Offset<SubGraph> subgraph =
97 CreateSubGraph(flatBufferBuilder,
98 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
99 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
100 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
101 flatBufferBuilder.CreateVector(&spaceDepthOperator, 1));
102 flatbuffers::Offset<Model> flatbufferModel =
103 CreateModel(flatBufferBuilder,
104 TFLITE_SCHEMA_VERSION,
105 flatBufferBuilder.CreateVector(&operatorCode, 1),
106 flatBufferBuilder.CreateVector(&subgraph, 1),
107 modelDescription,
108 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
109 flatBufferBuilder.Finish(flatbufferModel);
110 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
111 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
112}
113
114template <typename T>
115void SpaceDepthTest(tflite::BuiltinOperator spaceDepthOperatorCode,
116 tflite::TensorType tensorType,
117 std::vector<armnn::BackendId>& backends,
118 std::vector<int32_t>& inputShape,
119 std::vector<int32_t>& outputShape,
120 std::vector<T>& inputValues,
121 std::vector<T>& expectedOutputValues,
122 int32_t blockSize = 2)
123{
124 using namespace tflite;
125 std::vector<char> modelBuffer = CreateSpaceDepthTfLiteModel(spaceDepthOperatorCode,
126 tensorType,
127 inputShape,
128 outputShape,
129 blockSize);
130
131 const Model* tfLiteModel = GetModel(modelBuffer.data());
132 // Create TfLite Interpreters
133 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
134 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
135 (&armnnDelegateInterpreter) == kTfLiteOk);
136 CHECK(armnnDelegateInterpreter != nullptr);
137 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
138
139 std::unique_ptr<Interpreter> tfLiteInterpreter;
140 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
141 (&tfLiteInterpreter) == kTfLiteOk);
142 CHECK(tfLiteInterpreter != nullptr);
143 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
144
145 // Create the ArmNN Delegate
146 armnnDelegate::DelegateOptions delegateOptions(backends);
147 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
148 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
149 armnnDelegate::TfLiteArmnnDelegateDelete);
150 CHECK(theArmnnDelegate != nullptr);
151 // Modify armnnDelegateInterpreter to use armnnDelegate
152 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
153
154 // Set input data
155 armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
156 armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
157
158 // Run EnqueWorkload
159 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
160 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
161
162 // Compare output data
163 armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
164}
165
166} // anonymous namespace