blob: 99bb60b91a541e70d9dfcd2776980a5153fbb0fa [file] [log] [blame]
James Wardf89964e2020-11-09 11:57:47 +00001//
Ryan OShea238ecd92023-03-07 11:44:23 +00002// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
James Wardf89964e2020-11-09 11:57:47 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn_delegate.hpp>
9
10#include <flatbuffers/flatbuffers.h>
11#include <tensorflow/lite/interpreter.h>
12#include <tensorflow/lite/kernels/register.h>
13#include <tensorflow/lite/model.h>
Teresa Charlinad1b3d72023-03-14 12:10:28 +000014#include <schema_generated.h>
James Wardf89964e2020-11-09 11:57:47 +000015#include <tensorflow/lite/version.h>
16
17#include <doctest/doctest.h>
18
19namespace
20{
21std::vector<char> CreateTransposeTfLiteModel(tflite::TensorType tensorType,
22 const std::vector <int32_t>& input0TensorShape,
23 const std::vector <int32_t>& inputPermVecShape,
24 const std::vector <int32_t>& outputTensorShape,
25 const std::vector<int32_t>& inputPermVec)
26{
27 using namespace tflite;
28 flatbuffers::FlatBufferBuilder flatBufferBuilder;
Ryan OShea238ecd92023-03-07 11:44:23 +000029 flatbuffers::Offset<tflite::Buffer> buffers[4]{
30 CreateBuffer(flatBufferBuilder),
31 CreateBuffer(flatBufferBuilder),
32 CreateBuffer(flatBufferBuilder,
33 flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputPermVec.data()),
34 sizeof(int32_t) * inputPermVec.size())),
35 CreateBuffer(flatBufferBuilder)
36 };
James Wardf89964e2020-11-09 11:57:47 +000037 std::array<flatbuffers::Offset<Tensor>, 3> tensors;
38 tensors[0] = CreateTensor(flatBufferBuilder,
39 flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
40 input0TensorShape.size()),
Ryan OShea238ecd92023-03-07 11:44:23 +000041 tensorType, 1);
James Wardf89964e2020-11-09 11:57:47 +000042 tensors[1] = CreateTensor(flatBufferBuilder,
43 flatBufferBuilder.CreateVector<int32_t>(inputPermVecShape.data(),
44 inputPermVecShape.size()),
Ryan OShea238ecd92023-03-07 11:44:23 +000045 tflite::TensorType_INT32, 2,
James Wardf89964e2020-11-09 11:57:47 +000046 flatBufferBuilder.CreateString("permutation_vector"));
47 tensors[2] = CreateTensor(flatBufferBuilder,
48 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
49 outputTensorShape.size()),
Ryan OShea238ecd92023-03-07 11:44:23 +000050 tensorType,3);
Keith Davis892fafe2020-11-26 17:40:35 +000051 const std::vector<int32_t> operatorInputs{0, 1};
52 const std::vector<int32_t> operatorOutputs{2};
James Wardf89964e2020-11-09 11:57:47 +000053 flatbuffers::Offset <Operator> transposeOperator =
Ryan OShea238ecd92023-03-07 11:44:23 +000054 CreateOperator(flatBufferBuilder,
55 0,
56 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
57 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
58 BuiltinOptions_TransposeOptions,
59 CreateTransposeOptions(flatBufferBuilder).Union());
Keith Davis892fafe2020-11-26 17:40:35 +000060 const std::vector<int> subgraphInputs{0, 1};
61 const std::vector<int> subgraphOutputs{2};
James Wardf89964e2020-11-09 11:57:47 +000062 flatbuffers::Offset <SubGraph> subgraph =
Ryan OShea238ecd92023-03-07 11:44:23 +000063 CreateSubGraph(flatBufferBuilder,
64 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
65 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
66 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
67 flatBufferBuilder.CreateVector(&transposeOperator, 1));
James Wardf89964e2020-11-09 11:57:47 +000068 flatbuffers::Offset <flatbuffers::String> modelDescription =
Ryan OShea238ecd92023-03-07 11:44:23 +000069 flatBufferBuilder.CreateString("ArmnnDelegate: Transpose Operator Model");
James Wardf89964e2020-11-09 11:57:47 +000070 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
71 tflite::BuiltinOperator_TRANSPOSE);
72 flatbuffers::Offset <Model> flatbufferModel =
Ryan OShea238ecd92023-03-07 11:44:23 +000073 CreateModel(flatBufferBuilder,
74 TFLITE_SCHEMA_VERSION,
75 flatBufferBuilder.CreateVector(&operatorCode, 1),
76 flatBufferBuilder.CreateVector(&subgraph, 1),
77 modelDescription,
78 flatBufferBuilder.CreateVector(buffers, 4));
James Wardf89964e2020-11-09 11:57:47 +000079 flatBufferBuilder.Finish(flatbufferModel);
80 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
81 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
82}
83
84void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
85{
86 using namespace tflite;
87
88 // set test input data
89 std::vector<int32_t> input0Shape {4, 2, 3};
90 std::vector<int32_t> inputPermVecShape {3};
91 std::vector<int32_t> outputShape {2, 3, 4};
92
93 std::vector<float> input0Values = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
94 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23};
95 std::vector<int32_t> inputPermVec = {2, 0, 1};
96 std::vector<float> expectedOutputValues = {0, 3, 6, 9, 12, 15, 18, 21, 1, 4, 7, 10,
97 13, 16, 19, 22, 2, 5, 8, 11, 14, 17, 20, 23};
98
99 // create model
100 std::vector<char> modelBuffer = CreateTransposeTfLiteModel(::tflite::TensorType_FLOAT32,
101 input0Shape,
102 inputPermVecShape,
103 outputShape,
104 inputPermVec);
105
106 const Model* tfLiteModel = GetModel(modelBuffer.data());
107 // Create TfLite Interpreters
108 std::unique_ptr<Interpreter> armnnDelegateInterpreter;
109 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
Ryan OShea238ecd92023-03-07 11:44:23 +0000110 (&armnnDelegateInterpreter) == kTfLiteOk);
James Wardf89964e2020-11-09 11:57:47 +0000111 CHECK(armnnDelegateInterpreter != nullptr);
112 CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
113
114 std::unique_ptr<Interpreter> tfLiteInterpreter;
115 CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
Ryan OShea238ecd92023-03-07 11:44:23 +0000116 (&tfLiteInterpreter) == kTfLiteOk);
James Wardf89964e2020-11-09 11:57:47 +0000117 CHECK(tfLiteInterpreter != nullptr);
118 CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
119
120 // Create the ArmNN Delegate
121 armnnDelegate::DelegateOptions delegateOptions(backends);
122 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
Ryan OShea238ecd92023-03-07 11:44:23 +0000123 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
124 armnnDelegate::TfLiteArmnnDelegateDelete);
James Wardf89964e2020-11-09 11:57:47 +0000125 CHECK(theArmnnDelegate != nullptr);
126 // Modify armnnDelegateInterpreter to use armnnDelegate
127 CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
128
129 // Set input data for tflite
130 auto tfLiteInterpreterInput0Id = tfLiteInterpreter->inputs()[0];
131 auto tfLiteInterpreterInput0Data = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterInput0Id);
132 for (unsigned int i = 0; i < input0Values.size(); ++i)
133 {
134 tfLiteInterpreterInput0Data[i] = input0Values[i];
135 }
136
137 auto tfLiteInterpreterInput1Id = tfLiteInterpreter->inputs()[1];
138 auto tfLiteInterpreterInput1Data = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteInterpreterInput1Id);
139 for (unsigned int i = 0; i < inputPermVec.size(); ++i)
140 {
141 tfLiteInterpreterInput1Data[i] = inputPermVec[i];
142 }
143
144 //Set input data for armnn delegate
145 auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
146 auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInput0Id);
147 for (unsigned int i = 0; i < input0Values.size(); ++i)
148 {
149 armnnDelegateInput0Data[i] = input0Values[i];
150 }
151
152 auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
153 auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<int32_t>(armnnDelegateInput1Id);
154 for (unsigned int i = 0; i < inputPermVec.size(); ++i)
155 {
156 armnnDelegateInput1Data[i] = inputPermVec[i];
157 }
158
159 // Run EnqueWorkload
160 CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
161 CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
162
163 // Compare output data
164 auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
165 auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
166 auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
167 auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
168 for (size_t i = 0; i < expectedOutputValues.size(); ++i)
169 {
170 CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
171 CHECK(tfLiteInterpreterOutputData[i] == expectedOutputValues[i]);
172 CHECK(tfLiteInterpreterOutputData[i] == armnnDelegateOutputData[i]);
173 }
174
175 armnnDelegateInterpreter.reset(nullptr);
176}
177}