blob: b0ac2d60ad505e9b5ada68e3def4986b8d424da3 [file] [log] [blame]
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001//
2// Copyright © 2019 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ParserFlatbuffersFixture.hpp"
7#include "../TfLiteParser.hpp"
8
9#include <armnn/LayerVisitorBase.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010010#include <armnn/utility/Assert.hpp>
Matthew Sloyan589e3e82020-09-11 16:17:48 +010011#include <armnn/utility/NumericCast.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010012#include <armnn/utility/PolymorphicDowncast.hpp>
Aron Virginas-Tarc975f922019-10-23 17:38:17 +010013
14#include <layers/StandInLayer.hpp>
15
Aron Virginas-Tarc975f922019-10-23 17:38:17 +010016#include <boost/test/unit_test.hpp>
17
18#include <sstream>
19#include <string>
20#include <vector>
21
22BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
23
24using namespace armnn;
25
26class StandInLayerVerifier : public LayerVisitorBase<VisitorThrowingPolicy>
27{
28public:
29 StandInLayerVerifier(const std::vector<TensorInfo>& inputInfos,
30 const std::vector<TensorInfo>& outputInfos)
31 : LayerVisitorBase<VisitorThrowingPolicy>()
32 , m_InputInfos(inputInfos)
33 , m_OutputInfos(outputInfos) {}
34
35 void VisitInputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
36
Derek Lambertibaa177f2019-12-10 22:00:43 +000037 void VisitOutputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
Aron Virginas-Tarc975f922019-10-23 17:38:17 +010038
39 void VisitStandInLayer(const IConnectableLayer* layer,
40 const StandInDescriptor& descriptor,
41 const char*) override
42 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +010043 unsigned int numInputs = armnn::numeric_cast<unsigned int>(m_InputInfos.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +010044 BOOST_CHECK(descriptor.m_NumInputs == numInputs);
45 BOOST_CHECK(layer->GetNumInputSlots() == numInputs);
46
Matthew Sloyan589e3e82020-09-11 16:17:48 +010047 unsigned int numOutputs = armnn::numeric_cast<unsigned int>(m_OutputInfos.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +010048 BOOST_CHECK(descriptor.m_NumOutputs == numOutputs);
49 BOOST_CHECK(layer->GetNumOutputSlots() == numOutputs);
50
Jan Eilersbb446e52020-04-02 13:56:54 +010051 const StandInLayer* standInLayer = PolymorphicDowncast<const StandInLayer*>(layer);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +010052 for (unsigned int i = 0u; i < numInputs; ++i)
53 {
54 const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
55 BOOST_CHECK(connectedSlot != nullptr);
56
57 const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
58 BOOST_CHECK(inputInfo == m_InputInfos[i]);
59 }
60
61 for (unsigned int i = 0u; i < numOutputs; ++i)
62 {
63 const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
64 BOOST_CHECK(outputInfo == m_OutputInfos[i]);
65 }
66 }
67
68private:
69 std::vector<TensorInfo> m_InputInfos;
70 std::vector<TensorInfo> m_OutputInfos;
71};
72
73class DummyCustomFixture : public ParserFlatbuffersFixture
74{
75public:
76 explicit DummyCustomFixture(const std::vector<TensorInfo>& inputInfos,
77 const std::vector<TensorInfo>& outputInfos)
78 : ParserFlatbuffersFixture()
79 , m_StandInLayerVerifier(inputInfos, outputInfos)
80 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +010081 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputInfos.size());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010082 ARMNN_ASSERT(numInputs > 0);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +010083
Matthew Sloyan589e3e82020-09-11 16:17:48 +010084 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputInfos.size());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010085 ARMNN_ASSERT(numOutputs > 0);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +010086
87 m_JsonString = R"(
88 {
89 "version": 3,
90 "operator_codes": [{
91 "builtin_code": "CUSTOM",
92 "custom_code": "DummyCustomOperator"
93 }],
94 "subgraphs": [ {
95 "tensors": [)";
96
97 // Add input tensors
98 for (unsigned int i = 0u; i < numInputs; ++i)
99 {
100 const TensorInfo& inputInfo = inputInfos[i];
101 m_JsonString += R"(
102 {
103 "shape": )" + GetTensorShapeAsString(inputInfo.GetShape()) + R"(,
104 "type": )" + GetDataTypeAsString(inputInfo.GetDataType()) + R"(,
105 "buffer": 0,
106 "name": "inputTensor)" + std::to_string(i) + R"(",
107 "quantization": {
108 "min": [ 0.0 ],
109 "max": [ 255.0 ],
110 "scale": [ )" + std::to_string(inputInfo.GetQuantizationScale()) + R"( ],
111 "zero_point": [ )" + std::to_string(inputInfo.GetQuantizationOffset()) + R"( ],
112 }
113 },)";
114 }
115
116 // Add output tensors
117 for (unsigned int i = 0u; i < numOutputs; ++i)
118 {
119 const TensorInfo& outputInfo = outputInfos[i];
120 m_JsonString += R"(
121 {
122 "shape": )" + GetTensorShapeAsString(outputInfo.GetShape()) + R"(,
123 "type": )" + GetDataTypeAsString(outputInfo.GetDataType()) + R"(,
124 "buffer": 0,
125 "name": "outputTensor)" + std::to_string(i) + R"(",
126 "quantization": {
127 "min": [ 0.0 ],
128 "max": [ 255.0 ],
129 "scale": [ )" + std::to_string(outputInfo.GetQuantizationScale()) + R"( ],
130 "zero_point": [ )" + std::to_string(outputInfo.GetQuantizationOffset()) + R"( ],
131 }
132 })";
133
134 if (i + 1 < numOutputs)
135 {
136 m_JsonString += ",";
137 }
138 }
139
140 const std::string inputIndices = GetIndicesAsString(0u, numInputs - 1u);
141 const std::string outputIndices = GetIndicesAsString(numInputs, numInputs + numOutputs - 1u);
142
143 // Add dummy custom operator
144 m_JsonString += R"(],
145 "inputs": )" + inputIndices + R"(,
146 "outputs": )" + outputIndices + R"(,
147 "operators": [
148 {
149 "opcode_index": 0,
150 "inputs": )" + inputIndices + R"(,
151 "outputs": )" + outputIndices + R"(,
152 "builtin_options_type": 0,
153 "custom_options": [ ],
154 "custom_options_format": "FLEXBUFFERS"
155 }
156 ],
157 } ],
158 "buffers" : [
159 { },
160 { }
161 ]
162 }
163 )";
164
165 ReadStringToBinary();
166 }
167
168 void RunTest()
169 {
170 INetworkPtr network = m_Parser->CreateNetworkFromBinary(m_GraphBinary);
171 network->Accept(m_StandInLayerVerifier);
172 }
173
174private:
175 static std::string GetTensorShapeAsString(const TensorShape& tensorShape)
176 {
177 std::stringstream stream;
178 stream << "[ ";
179 for (unsigned int i = 0u; i < tensorShape.GetNumDimensions(); ++i)
180 {
181 stream << tensorShape[i];
182 if (i + 1 < tensorShape.GetNumDimensions())
183 {
184 stream << ",";
185 }
186 stream << " ";
187 }
188 stream << "]";
189
190 return stream.str();
191 }
192
193 static std::string GetDataTypeAsString(DataType dataType)
194 {
195 switch (dataType)
196 {
197 case DataType::Float32: return "FLOAT32";
Derek Lambertif90c56d2020-01-10 17:14:08 +0000198 case DataType::QAsymmU8: return "UINT8";
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100199 default: return "UNKNOWN";
200 }
201 }
202
203 static std::string GetIndicesAsString(unsigned int first, unsigned int last)
204 {
205 std::stringstream stream;
206 stream << "[ ";
207 for (unsigned int i = first; i <= last ; ++i)
208 {
209 stream << i;
210 if (i + 1 <= last)
211 {
212 stream << ",";
213 }
214 stream << " ";
215 }
216 stream << "]";
217
218 return stream.str();
219 }
220
221 StandInLayerVerifier m_StandInLayerVerifier;
222};
223
224class DummyCustom1Input1OutputFixture : public DummyCustomFixture
225{
226public:
227 DummyCustom1Input1OutputFixture()
228 : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32) },
229 { TensorInfo({ 2, 2 }, DataType::Float32) }) {}
230};
231
232class DummyCustom2Inputs1OutputFixture : public DummyCustomFixture
233{
234public:
235 DummyCustom2Inputs1OutputFixture()
236 : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32), TensorInfo({ 2, 2 }, DataType::Float32) },
237 { TensorInfo({ 3, 3 }, DataType::Float32) }) {}
238};
239
240BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator1Input1Output, DummyCustom1Input1OutputFixture)
241{
242 RunTest();
243}
244
245BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator2Inputs1Output, DummyCustom2Inputs1OutputFixture)
246{
247 RunTest();
248}
249
250BOOST_AUTO_TEST_SUITE_END()