blob: 560cdf1779c3c2dd445b19d9506667de8f3058a1 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Bentham39ef3e52020-01-20 10:09:09 +00008#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +01009#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000010#include <armnn/Logging.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010011#include <armnn/TypesUtils.hpp>
12#include <boost/filesystem.hpp>
13
14// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000015#include <armnnUtils/Permute.hpp>
16
Sadik Armagan479045b2018-10-01 11:51:37 +010017#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010018#include <VerificationHelpers.hpp>
19
20// The generated code based on the Tf Lite schema:
21#include <schema_generated.h>
22
Matteo Martincighe011d202019-11-28 11:35:47 +000023#include <flatbuffers/flexbuffers.h>
24
telsoa01c577f2c2018-08-31 09:22:23 +010025#include <boost/core/ignore_unused.hpp>
26#include <boost/assert.hpp>
27#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010028#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010029
30#include <fstream>
31#include <algorithm>
32#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010033#include <numeric>
telsoa01c577f2c2018-08-31 09:22:23 +010034
35using namespace armnn;
36using armnn::CheckLocation;
37namespace armnnTfLiteParser
38{
39namespace
40{
jimfly01c25411c2018-11-14 17:47:22 +000041
telsoa01c577f2c2018-08-31 09:22:23 +010042const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
43
44void CheckSubgraph(const TfLiteParser::ModelPtr & model,
45 size_t subgraphIndex,
46 const CheckLocation & location)
47{
48 if (model.get() == nullptr)
49 {
50 throw ParseException(
51 boost::str(
52 boost::format("%1% was called with invalid (null) model. "
53 "Possible reason is that the model is not yet loaded and Unpack(ed). "
54 "subgraph:%2% at %3%") %
55 location.m_Function %
56 subgraphIndex %
57 location.FileLine()));
58 }
59 else if (subgraphIndex >= model->subgraphs.size())
60 {
61 throw ParseException(
62 boost::str(
63 boost::format("%1% was called with an invalid subgraph index. "
64 "subgraph:%2% at %3%") %
65 location.m_Function %
66 subgraphIndex %
67 location.FileLine()));
68 }
69}
70
71#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
72 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
73
74void CheckModel(const TfLiteParser::ModelPtr & model,
75 size_t subgraphIndex,
76 size_t operatorIndex,
77 const CheckLocation & location)
78{
79 if (model.get() == nullptr)
80 {
81 throw ParseException(
82 boost::str(
83 boost::format("%1% was called with invalid (null) model. "
84 "Possible reason is that the model is not yet loaded and Unpack(ed). "
85 "subgraph:%2% operator:%3% at %4%") %
86 location.m_Function %
87 subgraphIndex %
88 operatorIndex %
89 location.FileLine()));
90 }
91 else if (subgraphIndex >= model->subgraphs.size())
92 {
93 throw ParseException(
94 boost::str(
95 boost::format("%1% was called with an invalid subgraph index. "
96 "subgraph:%2% operator:%3% at %4%") %
97 location.m_Function %
98 subgraphIndex %
99 operatorIndex %
100 location.FileLine()));
101 }
102 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
103 operatorIndex != VIRTUAL_OPERATOR_ID)
104 {
105 throw ParseException(
106 boost::str(
107 boost::format("%1% was called with an invalid operator index. "
108 "subgraph:%2% operator:%3% at %4%") %
109 location.m_Function %
110 subgraphIndex %
111 operatorIndex %
112 location.FileLine()));
113 }
114}
115
116#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
117 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
118
119void CheckTensor(const TfLiteParser::ModelPtr & model,
120 size_t subgraphIndex,
121 size_t tensorIndex,
122 const CheckLocation & location)
123{
124 // not checking model, because I assume CHECK_MODEL already run
125 // and checked that. An assert would do.
126 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
127
128 // also subgraph index should be checked by CHECK_MODEL so
129 // I only add an assert here
130 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
131
132 // the tensor index is the only one to check here
133 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
134 {
135 throw ParseException(
136 boost::str(
137 boost::format("%1% was called with an invalid tensor index. "
138 "subgraph:%2% tensor:%3% at %4%") %
139 location.m_Function %
140 subgraphIndex %
141 tensorIndex %
142 location.FileLine()));
143 }
144}
145
146#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
147 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
148
149void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
150 const CheckLocation & location)
151{
152 if (rawPtr == nullptr)
153 {
154 throw ParseException(
155 boost::str(
156 boost::format("%1% was called with a null tensor pointer. "
157 "at %2%") %
158 location.m_Function %
159 location.FileLine()));
160
161 }
162}
163
164#define CHECK_TENSOR_PTR(TENSOR_PTR) \
165 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
166
167void CheckBuffer(const TfLiteParser::ModelPtr & model,
168 size_t bufferIndex,
169 const CheckLocation & location)
170{
171 if (model.get() == nullptr)
172 {
173 throw ParseException(
174 boost::str(
175 boost::format("%1% was called with invalid (null) model. "
176 "Possible reason is that the model is not yet loaded and Unpack(ed). "
177 "buffer:%2% at %3%") %
178 location.m_Function %
179 bufferIndex %
180 location.FileLine()));
181 }
182 else if (bufferIndex >= model->buffers.size())
183 {
184 throw ParseException(
185 boost::str(
186 boost::format("%1% was called with an invalid buffer index. "
187 "buffer index:%2% at %3%") %
188 location.m_Function %
189 bufferIndex %
190 location.FileLine()));
191 }
192 else if (model->buffers[bufferIndex].get() == nullptr)
193 {
194 throw ParseException(
195 boost::str(
196 boost::format("The buffer #%1% is null. %3%") %
197 bufferIndex %
198 location.AsString()));
199 }
200}
201
202#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
203 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
204
205void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
206 const armnn::TensorInfo & tensorInfo,
207 uint32_t bufferId,
208 const CheckLocation & location)
209{
210 if (bufferPtr == nullptr)
211 {
212 throw ParseException(
213 boost::str(
214 boost::format("BufferPtr is null for buffer:%1%. %2%") %
215 bufferId %
216 location.AsString()));
217 }
218 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
219 tensorInfo.GetNumBytes() > bufferPtr->data.size())
220 {
221 std::stringstream ss;
222 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
223 << "For tensor: " << tensorInfo.GetShape()
224 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
225 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
226 throw ParseException(ss.str());
227 }
228}
229
230#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
231 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
232
233bool IsActivationSupported(tflite::ActivationFunctionType activationType)
234{
235 switch(activationType)
236 {
237 case tflite::ActivationFunctionType_NONE:
238 case tflite::ActivationFunctionType_RELU:
239 case tflite::ActivationFunctionType_RELU6:
240 case tflite::ActivationFunctionType_TANH:
241 {
242 return true;
243 }
244 default:
245 {
246 return false;
247 }
248 }
249}
250
251#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
252 do { \
253 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
254 { \
255 throw ParseException( \
256 boost::str( \
257 boost::format("TfLite parser doesn't suppport fused activation: " \
258 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
259 OPTION->fused_activation_function % \
260 tflite::EnumNameActivationFunctionType(\
261 OPTION->fused_activation_function) % \
262 __func__ % \
263 SUBGRAPH_INDEX % \
264 OPERATOR_INDEX % \
265 CHECK_LOCATION().FileLine())); \
266 } \
267 } while(false)
268
269
270std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
271{
272 std::vector<unsigned int> result;
273 result.reserve(in.size());
274 for (auto & i : in)
275 {
276 result.push_back(CHECKED_NON_NEGATIVE(i));
277 }
278 return result;
279}
280
281void CalcPadding(uint32_t inputSize,
282 uint32_t filterSize,
283 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100284 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100285 uint32_t& paddingFront,
286 uint32_t& paddingBack,
287 tflite::Padding padding)
288{
289 paddingFront = 0;
290 paddingBack = 0;
291 if (padding == tflite::Padding_SAME)
292 {
293 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100294 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
295 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100296 if (temp > inputSize)
297 {
298 paddingFront = (temp - inputSize) / 2;
299 paddingBack = (temp - inputSize) - paddingFront;
300 }
301 }
302}
303
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000304armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100305{
306 armnn::DataType type;
307 CHECK_TENSOR_PTR(tensorPtr);
308
309 switch (tensorPtr->type)
310 {
311 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000312 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100313 break;
314 case tflite::TensorType_FLOAT32:
315 type = armnn::DataType::Float32;
316 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000317 case tflite::TensorType_INT8:
Ryan OShea03181ff2020-02-07 17:22:22 +0000318 if (tensorPtr->quantization->zero_point.size() == 1 && tensorPtr->quantization->zero_point[0] != 0)
319 {
320 type = armnn::DataType::QAsymmS8;
321 }
322 else
323 {
324 type = armnn::DataType::QSymmS8;
325 }
Finn Williamsed66d142019-12-06 09:55:55 +0000326 break;
327 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000328 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000329 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100330 case tflite::TensorType_INT32:
331 type = armnn::DataType::Signed32;
332 break;
333
334 default:
335 {
336 CheckLocation location = CHECK_LOCATION();
337 throw ParseException(
338 boost::str(
339 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
340 tensorPtr->type %
341 tflite::EnumNameTensorType(tensorPtr->type) %
342 tensorPtr->name %
343 location.AsString()));
344 }
345 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100346 std::vector<unsigned int> safeShape = shapes;
347 if (safeShape.size() == 0)
348 {
349 safeShape.push_back(1);
350 }
351
Keith Davisd305e1a2020-01-22 11:57:54 +0000352 float quantizationScale = 0.0f;
353 int32_t quantizationOffset = 0;
354
355 if (tensorPtr->quantization.get())
356 {
357 if (tensorPtr->quantization->scale.size() <= 1)
358 {
359 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
360 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
361
362 if (tensorPtr->quantization->scale.size() == 1)
363 {
364 quantizationScale = tensorPtr->quantization->scale[0];
365 }
366 if (tensorPtr->quantization->zero_point.size() == 1)
367 {
368 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000369 // but this is what we support at the moment in ArmNN
Keith Davisd305e1a2020-01-22 11:57:54 +0000370 quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
371 }
372
373 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
374 safeShape.data(),
375 type,
376 quantizationScale,
377 quantizationOffset);
378
379 return result;
380 }
381 else
382 {
383 std::vector<float> quantizationScales;
384 std::vector<int32_t> quantizationOffsets;
385
386 // Scale
387 std::copy(tensorPtr->quantization->scale.begin(),
388 tensorPtr->quantization->scale.end(),
389 std::back_inserter(quantizationScales));
390
391 // QSymm Per-axis
392 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
393 safeShape.data(),
394 type,
395 quantizationScales,
396 boost::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
397
398 return result;
399 }
400 }
401 else
402 {
403 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
404 safeShape.data(),
405 type,
406 quantizationScale,
407 quantizationOffset);
408 return result;
409 }
telsoa01c577f2c2018-08-31 09:22:23 +0100410}
411
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000412armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
413{
414 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
415 return ToTensorInfo(tensorPtr, dimensions);
416}
417
telsoa01c577f2c2018-08-31 09:22:23 +0100418template<typename T>
419std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
420CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
421 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000422 armnn::TensorInfo& tensorInfo,
423 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100424{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000425 boost::ignore_unused(tensorPtr);
telsoa01c577f2c2018-08-31 09:22:23 +0100426 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
427 BOOST_ASSERT_MSG(bufferPtr != nullptr,
428 boost::str(
429 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
430
431 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000432
433 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
434 {
435 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000436 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
437 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000438 }
439 else
440 {
441 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
442 }
443
telsoa01c577f2c2018-08-31 09:22:23 +0100444 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
445}
446
telsoa01c577f2c2018-08-31 09:22:23 +0100447armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
448{
449 // generate the binding id by shifting the tensor id by 8 bit
450 // and add the subgraph id, which allows 256 subgraphs
451 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
452}
453
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000454bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
455{
456 const unsigned int actualSize = actual.GetNumDimensions();
457 if (actualSize != expected.size())
458 {
459 return false;
460 }
461
462 for (unsigned int i = 0u; i < actualSize; i++)
463 {
464 if (expected[i] < 0 ||
465 actual[i] != static_cast<unsigned int>(expected[i]))
466 {
467 return false;
468 }
469 }
470
471 return true;
472}
473
telsoa01c577f2c2018-08-31 09:22:23 +0100474} // <anonymous>
475
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100476TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
477: m_Options(options)
478, m_Network(nullptr, nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +0100479, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
480{
481 // register supported operators
Sadik Armagan66dedc72019-12-10 16:32:07 +0000482 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000483 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
484 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
485 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
486 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000487 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000488 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
Finn Williamsed66d142019-12-06 09:55:55 +0000489 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000490 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
491 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
492 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
493 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
494 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000495 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000496 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000497 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
498 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
499 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
500 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000501 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
502 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
503 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
504 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
505 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000506 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000507 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
508 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000509 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000510 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
511 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
512 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000513 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
514 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
515 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
516 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100517
518 // register supported custom operators
519 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100520}
521
522void TfLiteParser::ResetParser()
523{
524 m_Network = armnn::INetworkPtr(nullptr, nullptr);
525 m_Model = nullptr;
526 m_SubgraphConnections.clear();
527}
528
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200529void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
530 size_t operatorIndex,
531 IConnectableLayer *layer)
532{
533 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
534 BOOST_ASSERT(layer != nullptr);
535
Derek Lambertiff05cc52019-04-26 13:05:17 +0100536 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
537 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200538
539 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
540
541 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100542 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200543 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100544 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200545
546 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
547 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
548
549 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
550 {
551 uint32_t id = reshapedInputId;
552 reshapedInputId = inputId;
553 inputId = id;
554
555 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
556 inputTensorInfo = ToTensorInfo(tensorPtr);
557 }
558
559 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
560
561 std::vector<unsigned> reshapedDim;
562 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
563 {
564 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
565 }
566
567 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
568 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
569
570 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
571
572 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
573 armnn::ReshapeDescriptor desc;
574 desc.m_TargetShape = reshapedTensorInfo.GetShape();
575 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
576
577 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
578 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
579
580 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
581
582 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
583 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
584}
585
telsoa01c577f2c2018-08-31 09:22:23 +0100586INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
587{
588 ResetParser();
589 m_Model = LoadModelFromFile(graphFile);
590 return CreateNetworkFromModel();
591}
592
593INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
594{
595 ResetParser();
596 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
597 return CreateNetworkFromModel();
598}
599
600INetworkPtr TfLiteParser::CreateNetworkFromModel()
601{
602 m_Network = INetwork::Create();
603 BOOST_ASSERT(m_Model.get() != nullptr);
604
605 bool failedToCreate = false;
606 std::stringstream errors;
607
608 if (m_Model->subgraphs.size() != 1)
609 {
610 throw ParseException(
611 boost::str(
612 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
613 m_Model->subgraphs.size() %
614 CHECK_LOCATION().AsString()));
615 }
616
617 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100618 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100619 {
620 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
621
622 size_t operatorIndex = 0;
623 for (OperatorPtr const & op : subgraph->operators)
624 {
625 try
626 {
telsoa01c577f2c2018-08-31 09:22:23 +0100627 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
628 auto builtinCode = opCodePtr->builtin_code;
629
630 if (builtinCode > tflite::BuiltinOperator_MAX)
631 {
632 throw ParseException(
633 boost::str(
634 boost::format("Operator code %1% is out of range 0-%2%. "
635 "subgraph:%3% operator idx:%4%. %5%") %
636 builtinCode %
637 tflite::BuiltinOperator_MAX %
638 subgraphIndex %
639 operatorIndex %
640 CHECK_LOCATION().AsString()));
641 }
642
643 // lookup and call the parser function
644 auto & parserFunction = m_ParserFunctions[builtinCode];
645 (this->*parserFunction)(subgraphIndex, operatorIndex);
646 }
647 catch (const ParseException& e)
648 {
649 failedToCreate = true;
650 std::stringstream errorString;
651
652 errorString << "Failed to parse operator #" << operatorIndex
653 << " within subgraph #" << subgraphIndex
654 << " error: " << e.what();
Derek Lamberti08446972019-11-26 16:38:31 +0000655 ARMNN_LOG(error) << errorString.str();
telsoa01c577f2c2018-08-31 09:22:23 +0100656
657 errors << errorString.str() << "\n";
658 }
659 ++operatorIndex;
660 }
661
662 SetupInputLayers(subgraphIndex);
663 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200664 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100665
666 ++subgraphIndex;
667 }
668
669 if (failedToCreate)
670 {
671 // we can skip everything and let the outer exception handler deal with the error
672 throw ParseException(errors.str());
673 }
674
675 // establish the connections from the layer outputs to the inputs of the subsequent layers
676 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
677 {
678 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
679 {
680 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
681 {
682 for (size_t inputSlotIdx = 0;
683 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
684 ++inputSlotIdx)
685 {
686 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
687 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
688 }
689 }
690 }
691 }
692
693 return std::move(m_Network);
694}
695
696void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
697 size_t tensorIndex,
698 armnn::IOutputSlot* slot)
699{
700 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
701 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
702 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
703
704 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
705
706 // assuming there is only one producer for that tensor
707 if (tensorSlots.outputSlot != nullptr)
708 {
709 throw ParseException(boost::str(
710 boost::format("Another layer has already registered itself as the producer of "
711 "subgraph:%1% tensor:%2% %3%") %
712 subgraphIndex %
713 tensorIndex %
714 CHECK_LOCATION().AsString()));
715 }
716
717 tensorSlots.outputSlot = slot;
718}
719
720void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
721 size_t tensorIndex,
722 armnn::IInputSlot* slot)
723{
724 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
725 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
726 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
727
728 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
729 tensorSlots.inputSlots.push_back(slot);
730}
731
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100732void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
733{
734 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
735
736 // NOTE: By default we presume the custom operator is not supported
737 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
738
739 // Identify custom code defined for custom operator
740 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
741 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
742
743 // Find parser function that correspondes to custom code (if any)
744 auto iterator = m_CustomParserFunctions.find(customCode);
745 if (iterator != m_CustomParserFunctions.end())
746 {
747 customParserFunction = iterator->second;
748 }
749
750 // Run parser function
751 (this->*customParserFunction)(subgraphIndex, operatorIndex);
752}
753
telsoa01c577f2c2018-08-31 09:22:23 +0100754void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
755{
756 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100757
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100758 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
759
760 auto opcodeIndex = operatorPtr->opcode_index;
761 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
762
763 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
764 {
765 // Do not add StandInLayer, throw ParseException instead
766 throw ParseException(
767 boost::str(
768 boost::format("Operator not supported. "
769 "subgraph:%1% operator:%2% "
770 "opcode_index:%3% opcode:%4% / %5% %6%") %
771 subgraphIndex %
772 operatorIndex %
773 opcodeIndex %
774 opcode %
775 tflite::EnumNameBuiltinOperator(opcode) %
776 CHECK_LOCATION().AsString()));
777 }
778
779 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
780 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
781
782 const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
783 const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
784
785 StandInDescriptor descriptor(numInputs, numOutputs);
786 auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
787
788 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
789 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
790 for (unsigned int i = 0u; i < numOutputs; ++i)
791 {
792 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i]));
793 }
794
795 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
796 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
797
798 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
799 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100800}
801
telsoa01c577f2c2018-08-31 09:22:23 +0100802void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
803{
804 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
805
806 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
807 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
808
809 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
810
811 Convolution2dDescriptor desc;
812 desc.m_BiasEnabled = false;
813 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
814 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000815 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100816 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
817 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000818
telsoa01c577f2c2018-08-31 09:22:23 +0100819 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
820 CHECK_VALID_SIZE(inputs.size(), 2, 3);
821
822 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
823 CHECK_VALID_SIZE(outputs.size(), 1);
824
825 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
826 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
827
828 // assuming input is NHWC
829 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
830 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
831
832 // assuming the filter is OHWI : Output, H, W, Input
833 // which is essentially the same as NHWC
834 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
835 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
836
Pablo Tellof0bd6832019-04-26 17:58:13 +0100837 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
838 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
839 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
840 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100841
Matteo Martincigh747ef822018-12-18 09:26:39 +0000842 auto filterTensorAndData = CreateConstTensor(inputs[1],
843 filterTensorInfo,
844 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100845 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100846
847 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
848
849 if (inputs.size() == 3)
850 {
851 desc.m_BiasEnabled = true;
852 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000853 auto biasTensorAndData = CreateConstTensor(inputs[2],
854 biasTensorInfo,
855 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100856 layer = m_Network->AddConvolution2dLayer(desc,
857 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100858 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100859 layerName.c_str());
860 }
861 else
862 {
863 layer = m_Network->AddConvolution2dLayer(desc,
864 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100865 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100866 layerName.c_str());
867 }
868
869 BOOST_ASSERT(layer != nullptr);
870
telsoa01c577f2c2018-08-31 09:22:23 +0100871 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000872 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100873
874 // register the input connection slots for the layer, connections are made after all layers have been created
875 // only the tensors for the inputs are relevant, exclude the const tensors
876 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000877 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100878
jimfly01c25411c2018-11-14 17:47:22 +0000879 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100880 // register the output connection slots for the layer, connections are made after all layers have been created
881 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
882 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
883}
884
885void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
886{
887 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
888
889 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
890 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
891
892 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
893
894 DepthwiseConvolution2dDescriptor desc;
895 desc.m_BiasEnabled = false;
896 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
897 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000898 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100899 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100900
901 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
902 CHECK_VALID_SIZE(inputs.size(), 2, 3);
903 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
904 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100905 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
906 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000907
telsoa01c577f2c2018-08-31 09:22:23 +0100908 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
909 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
910
Matteo Martincigh747ef822018-12-18 09:26:39 +0000911 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100912 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
913 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000914
915 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100916 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
917 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
918
Matteo Martincigh747ef822018-12-18 09:26:39 +0000919 // Reshape weights as [ H, W, I, M ]
920 filterTensorInfo.SetShape({ filterHeight,
921 filterWidth,
922 inputTensorInfo.GetShape()[3],
923 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
924
925 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
926 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
927
Pablo Tellof0bd6832019-04-26 17:58:13 +0100928 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
929 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
930 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
931 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100932
Matteo Martincigh747ef822018-12-18 09:26:39 +0000933 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100934 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100935 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
936
937 if (inputs.size() == 3)
938 {
939 desc.m_BiasEnabled = true;
940 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000941 auto biasTensorAndData = CreateConstTensor(inputs[2],
942 biasTensorInfo,
943 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100944 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
945 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100946 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100947 layerName.c_str());
948 }
949 else
950 {
951 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
952 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100953 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100954 layerName.c_str());
955 }
956 BOOST_ASSERT(layer != nullptr);
957
telsoa01c577f2c2018-08-31 09:22:23 +0100958 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000959 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100960
961 // register the input connection slots for the layer, connections are made after all layers have been created
962 // only the tensors for the inputs are relevant, exclude the const tensors
963 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000964 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100965
jimfly01c25411c2018-11-14 17:47:22 +0000966 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100967 // register the output connection slots for the layer, connections are made after all layers have been created
968 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
969 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
970}
971
Finn Williamsed66d142019-12-06 09:55:55 +0000972void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
973{
974 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
975
976 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
977 CHECK_VALID_SIZE(inputs.size(), 1);
978
979 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
980 CHECK_VALID_SIZE(outputs.size(), 1);
981
982 auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
983
984 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
985 BOOST_ASSERT(layer != nullptr);
986
987 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
988 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
989
990 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
991 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
992
993 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
994 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
995}
996
Keith Davis4cd29a02019-09-09 14:49:20 +0100997void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
998{
999 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1000
1001 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001002 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001003
1004 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1005 CHECK_VALID_SIZE(outputs.size(), 1);
1006
1007 armnn::IConnectableLayer* layer = nullptr;
1008 auto layerName = boost::str(boost::format("Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
1009
1010 PermuteDescriptor desc;
1011
josh minorba424d22019-11-13 10:55:17 -06001012 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001013 {
1014 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1015 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001016 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1017 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001018 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
1019
josh minorba424d22019-11-13 10:55:17 -06001020 // permuteShape assumes Tf/Np permute vectors, we must translate to armnn expected form
1021 // to do so we find the perm vector which would invert what a tf perm vector would do (ex 3,0,1,2 -> 1,2,3,0)
1022 std::vector<unsigned int> armnnPermuteShape(numPermVecElements);
1023 std::vector<unsigned int>::iterator it;
1024 for (unsigned int i = 0u; i < numPermVecElements; ++i)
1025 {
1026 it = std::find(permuteShape.begin(), permuteShape.end(), i);
1027 armnnPermuteShape[i] = static_cast<unsigned int>(std::distance(permuteShape.begin(), it));
1028 }
Kevin May85d92602019-09-27 17:21:06 +01001029
josh minorba424d22019-11-13 10:55:17 -06001030 PermutationVector permutationVector(armnnPermuteShape.data(), permuteTensorInfo.GetNumElements());
1031
1032 desc = PermuteDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001033 }
1034
Keith Davis4cd29a02019-09-09 14:49:20 +01001035 layer = m_Network->AddPermuteLayer(desc, layerName.c_str());
1036
1037 BOOST_ASSERT(layer != nullptr);
1038
1039 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1040 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1041
1042 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1043 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1044
1045 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1046 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1047}
1048
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001049void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
1050{
1051 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1052
1053 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1054 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1055
1056 TransposeConvolution2dDescriptor desc;
1057 desc.m_BiasEnabled = false;
1058 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1059 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1060 desc.m_DataLayout = armnn::DataLayout::NHWC;
1061
1062 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001063 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001064
1065 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1066 CHECK_VALID_SIZE(outputs.size(), 1);
1067
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001068 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001069 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1070
1071 // TfLite uses NHWC tensors
1072 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1073 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1074
1075 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1076 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1077
1078 CalcPadding(inputHeight,
1079 filterHeight,
1080 desc.m_StrideY,
1081 1, // DilationY
1082 desc.m_PadTop,
1083 desc.m_PadBottom,
1084 options->padding);
1085
1086 CalcPadding(inputWidth,
1087 filterWidth,
1088 desc.m_StrideX,
1089 1, // DilationX
1090 desc.m_PadLeft,
1091 desc.m_PadRight,
1092 options->padding);
1093
1094 auto filterTensorAndData = CreateConstTensor(inputs[1],
1095 filterTensorInfo,
1096 armnn::Optional<armnn::PermutationVector&>());
1097
1098 armnn::IConnectableLayer* layer = nullptr;
1099 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
1100
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001101 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1102 filterTensorAndData.first,
1103 EmptyOptional(),
1104 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001105
1106 BOOST_ASSERT(layer != nullptr);
1107
1108 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1109 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1110
1111 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1112 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001113 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001114
1115 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1116 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1117}
1118
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001119void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
1120{
1121 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1122}
1123
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001124void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
1125{
1126 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1127
1128 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1129 CHECK_VALID_SIZE(inputs.size(), 3);
1130
1131 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1132 CHECK_VALID_SIZE(outputs.size(), 1);
1133
1134 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1135 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1136
1137 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1138 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1139
1140 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1141 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1142
1143 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1144 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1145
1146 size_t step = 2;
1147 std::vector<std::pair<unsigned int, unsigned int>> crops;
1148 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1149 {
1150 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1151 }
1152
1153 armnn::BatchToSpaceNdDescriptor desc;
1154 desc.m_BlockShape = blockShape;
1155 desc.m_Crops = crops;
1156 desc.m_DataLayout = armnn::DataLayout::NHWC;
1157
1158 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1159
1160 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1161 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1162
1163 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1164
1165 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1166 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1167
1168 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1169 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1170}
1171
Matthew Jackson28c94572019-07-18 10:47:03 +01001172void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1173{
1174 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1175
1176 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1177 CHECK_VALID_SIZE(inputs.size(), 1);
1178
1179 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1180 CHECK_VALID_SIZE(outputs.size(), 1);
1181
1182 L2NormalizationDescriptor desc;
1183 desc.m_DataLayout = armnn::DataLayout::NHWC;
1184 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1185 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1186
1187 BOOST_ASSERT(layer != nullptr);
1188
1189 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1190 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1191
1192 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1193 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1194
1195 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1196 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1197}
1198
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001199void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1200{
1201 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1202}
1203
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001204void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1205{
1206 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1207
1208 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1209 CHECK_VALID_SIZE(inputs.size(), 2);
1210
1211 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1212 CHECK_VALID_SIZE(outputs.size(), 1);
1213
1214 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1215 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1216
1217 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1218 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1219
1220 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1221 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1222
1223 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1224 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1225 {
1226 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1227 }
1228 else
1229 {
1230 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1231 }
1232
1233 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1234 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1235}
1236
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001237void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1238{
1239 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1240
1241 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1242 CHECK_VALID_SIZE(inputs.size(), 2);
1243
1244 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1245 CHECK_VALID_SIZE(outputs.size(), 1);
1246
1247 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1248 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1249
1250 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1251 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1252
1253 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1254 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1255
1256 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1257 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1258 {
1259 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1260 }
1261 else
1262 {
1263 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1264 }
1265
1266 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1267 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1268}
1269
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001270void TfLiteParser::ParsePool(size_t subgraphIndex,
1271 size_t operatorIndex,
1272 PoolingAlgorithm algorithm)
1273{
1274 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1275
1276 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1277 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1278
1279 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1280
1281 std::string layerName;
1282
1283 switch (algorithm)
1284 {
1285 case PoolingAlgorithm::Average:
1286 layerName =
1287 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1288 break;
1289 case PoolingAlgorithm::Max:
1290 layerName =
1291 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1292 break;
1293 default:
1294 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1295 }
1296
1297 Pooling2dDescriptor desc;
1298
1299 desc.m_PoolType = algorithm;
1300 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1301 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1302 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1303 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1304 desc.m_PaddingMethod = PaddingMethod::Exclude;
1305 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001306 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001307
1308 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1309 CHECK_VALID_SIZE(inputs.size(), 1);
1310 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1311
1312 // assuming input is NHWC
1313 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1314 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1315
Pablo Tellof0bd6832019-04-26 17:58:13 +01001316 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1317 desc.m_PadTop, desc.m_PadBottom, options->padding);
1318 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1319 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001320
1321 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1322 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001323
1324 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1325
1326 BOOST_ASSERT(layer != nullptr);
1327
jimfly01c25411c2018-11-14 17:47:22 +00001328 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1329 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001330
1331 // register the input connection slots for the layer, connections are made after all layers have been created
1332 // only the tensors for the inputs are relevant, exclude the const tensors
1333 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001334 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001335
jimfly01c25411c2018-11-14 17:47:22 +00001336 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001337 // register the output connection slots for the layer, connections are made after all layers have been created
1338 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1339 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1340}
1341
josh minorba424d22019-11-13 10:55:17 -06001342void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
1343{
1344 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1345
1346 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1347 CHECK_VALID_SIZE(inputs.size(), 3);
1348 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1349 CHECK_VALID_SIZE(outputs.size(), 1);
1350
1351 SliceDescriptor desc;
1352
1353 // set begin tensor info for slice descriptor
1354 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1355 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1356
1357 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1358 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1359
1360 // set size tensor info for slice descriptor
1361 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1362 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1363
1364 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1365 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1366 desc = SliceDescriptor(begin, size);
1367
1368 auto layerName = boost::str(boost::format("Slice:%1%:%2%") % subgraphIndex % operatorIndex);
1369 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
1370
1371 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1372 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1373
1374 // register the input connection slots for the layer, connections are made after all layers have been created
1375 // only the tensors for the inputs are relevant, exclude the const tensors
1376 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1377 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1378
1379 // register the output connection slots for the layer, connections are made after all layers have been created
1380 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1381 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1382}
1383
telsoa01c577f2c2018-08-31 09:22:23 +01001384void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1385{
1386 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1387 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1388 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1389
1390 SoftmaxDescriptor desc;
1391 desc.m_Beta = options->beta;
1392
1393 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1394 CHECK_VALID_SIZE(inputs.size(), 1);
1395 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1396 CHECK_VALID_SIZE(outputs.size(), 1);
1397
1398 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1399 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1400
1401 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1402 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1403
1404 // register the input connection slots for the layer, connections are made after all layers have been created
1405 // only the tensors for the inputs are relevant, exclude the const tensors
1406 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1407 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1408
1409 // register the output connection slots for the layer, connections are made after all layers have been created
1410 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1411 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1412}
1413
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001414void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1415{
1416 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1417
1418 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1419 CHECK_VALID_SIZE(inputs.size(), 3);
1420
1421 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1422 CHECK_VALID_SIZE(outputs.size(), 1);
1423
1424 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1425 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1426
1427 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1428 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1429
1430 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1431 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1432
1433 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1434 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1435
1436 size_t step = 2;
1437 std::vector<std::pair<unsigned int, unsigned int>> padList;
1438 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1439 {
1440 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1441 }
1442
1443 armnn::SpaceToBatchNdDescriptor desc;
1444 desc.m_BlockShape = blockShape;
1445 desc.m_PadList = padList;
1446 desc.m_DataLayout = armnn::DataLayout::NHWC;
1447
1448 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1449
1450 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1451 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1452
1453 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1454
1455 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1456 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1457
1458 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1459 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1460}
1461
telsoa01c577f2c2018-08-31 09:22:23 +01001462armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1463 const armnn::TensorInfo & inputTensorInfo)
1464{
1465 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1466 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1467 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1468
1469 if (inputTensorInfo.GetNumDimensions() > 4)
1470 {
1471 std::stringstream ss;
1472 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1473 << " shape:" << inputTensorInfo.GetShape() << " "
1474 << CHECK_LOCATION().AsString();
1475 throw ParseException(ss.str());
1476 }
1477
1478 if (squeezeDims.empty())
1479 {
1480 squeezeDims.assign(dimensionSequence,
1481 dimensionSequence+inputTensorInfo.GetNumDimensions());
1482 }
1483
1484 std::vector<uint32_t> outputDims;
1485 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1486 {
1487 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1488 auto currentDimension = inputTensorInfo.GetShape()[i];
1489 if (skipSqueeze || currentDimension != 1)
1490 {
1491 outputDims.push_back(currentDimension);
1492 }
1493 }
1494
1495 if (outputDims.size() > 4)
1496 {
1497 std::stringstream ss;
1498 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1499 << " shape:" << inputTensorInfo.GetShape() << " "
1500 << CHECK_LOCATION().AsString();
1501 throw ParseException(ss.str());
1502 }
1503
1504 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1505 outputDims.data());
1506
1507 // we need to preserve the tensor type and the quantization data as well
1508 TensorInfo outTensorInfo = inputTensorInfo;
1509 outTensorInfo.SetShape(outShape);
1510
1511 return outTensorInfo;
1512}
1513
1514void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1515{
1516 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1517
1518 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1519 CHECK_VALID_SIZE(inputs.size(), 1);
1520
1521 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1522 CHECK_VALID_SIZE(outputs.size(), 1);
1523
1524 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1525 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1526
1527 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1528 armnn::TensorInfo outputTensorInfo =
1529 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1530 inputTensorInfo);
1531
1532 ReshapeDescriptor reshapeDesc;
1533 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1534
1535 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1536 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1537 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1538
1539 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1540 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1541
1542 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1543 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1544}
1545
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001546void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1547{
1548 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1549
1550 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1551 CHECK_VALID_SIZE(inputs.size(), 4);
1552
1553 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1554 CHECK_VALID_SIZE(outputs.size(), 1);
1555
1556 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1557 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1558
1559 StridedSliceDescriptor desc;
1560 desc.m_BeginMask = options->begin_mask;
1561 desc.m_EllipsisMask = options->ellipsis_mask;
1562 desc.m_EndMask = options->end_mask;
1563 desc.m_NewAxisMask = options->new_axis_mask;
1564 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1565 desc.m_DataLayout = armnn::DataLayout::NHWC;
1566
1567 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1568 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1569
1570 std::vector<int> begin(beginTensorInfo.GetNumElements());
1571 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1572
1573 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1574 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1575
1576 std::vector<int> end(endTensorInfo.GetNumElements());
1577 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1578
1579 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1580 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1581
1582 std::vector<int> stride(strideTensorInfo.GetNumElements());
1583 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1584
1585 desc.m_Begin = begin;
1586 desc.m_End = end;
1587 desc.m_Stride = stride;
1588
1589 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1590 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1591
1592 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1593 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1594
1595 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1596 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1597
1598 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1599 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1600}
1601
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001602void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1603{
1604 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1605
1606 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1607 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1608
1609 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1610 CHECK_VALID_SIZE(inputs.size(), 2);
1611
1612 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1613 CHECK_VALID_SIZE(outputs.size(), 1);
1614
1615 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1616 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1617
1618 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1619 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1620
1621 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1622 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1623
1624 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1625 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1626 {
1627 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1628 }
1629 else
1630 {
1631 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1632 }
1633
1634 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1635
1636 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1637 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1638}
1639
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001640void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1641{
1642 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1643
1644 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1645 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1646
1647 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1648 CHECK_VALID_SIZE(inputs.size(), 2);
1649
1650 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1651 CHECK_VALID_SIZE(outputs.size(), 1);
1652
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001653 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1654 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1655
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001656 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1657 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1658
1659 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1660 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1661
1662 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001663 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1664 {
1665 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1666 }
1667 else
1668 {
1669 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1670 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001671
1672 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1673
1674 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1675 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1676}
1677
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001678void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1679{
1680 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1681
1682 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1683 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1684
1685 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1686 CHECK_VALID_SIZE(inputs.size(), 2);
1687
1688 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1689 CHECK_VALID_SIZE(outputs.size(), 1);
1690
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001691 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1692 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1693
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001694 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1695 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1696
1697 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1698 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1699
1700 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001701 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1702 {
1703 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1704 }
1705 else
1706 {
1707 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1708 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001709
1710 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1711
1712 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1713 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1714}
1715
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001716void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1717{
1718 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1719
1720 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1721
1722 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1723 CHECK_VALID_SIZE(outputs.size(), 1);
1724
1725 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1726 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1727
1728 armnn::MeanDescriptor desc;
1729 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1730 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1731 desc.m_Axis = axis;
1732
1733 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1734 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1735
1736 desc.m_KeepDims =
1737 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1738 true : false;
1739
1740 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1741 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1742
1743 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1744
1745 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1746 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1747
1748 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1749 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1750}
1751
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001752void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1753{
1754 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1755
1756 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1757
1758 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1759 CHECK_VALID_SIZE(outputs.size(), 1);
1760
1761 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1762 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1763
1764 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1765 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1766
1767 size_t step = 2;
1768 armnn::PadDescriptor desc;
1769 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1770 {
1771 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1772 }
1773
1774 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1775 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1776
1777 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1778 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1779
1780 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1781 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1782
1783 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1784 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1785}
1786
Sadik Armagan66dedc72019-12-10 16:32:07 +00001787void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
1788{
1789 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1790
1791 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1792 CHECK_VALID_SIZE(inputs.size(), 1);
1793
1794 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1795 CHECK_VALID_SIZE(outputs.size(), 1);
1796
1797 auto layerName = boost::str(boost::format("Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
1798
1799 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
1800 BOOST_ASSERT(layer != nullptr);
1801
1802 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1803 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1804
1805 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1806 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1807
1808 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1809 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1810}
Finn Williamsc42c3842019-01-22 14:18:11 +00001811
Sadik Armagan58f39192018-09-17 14:14:39 +01001812void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1813{
Finn Williamsc42c3842019-01-22 14:18:11 +00001814 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001815}
1816
1817void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1818{
Finn Williamsc42c3842019-01-22 14:18:11 +00001819 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1820}
Sadik Armagan58f39192018-09-17 14:14:39 +01001821
Finn Williamsc42c3842019-01-22 14:18:11 +00001822void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1823{
1824 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1825}
1826
Nina Drozd99851762019-04-09 09:37:38 +01001827void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1828{
1829 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1830}
1831
Finn Williamsc42c3842019-01-22 14:18:11 +00001832
1833void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1834{
1835 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001836 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1837 boost::ignore_unused(operatorPtr);
1838
1839 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1840 CHECK_VALID_SIZE(inputs.size(), 1);
1841
1842 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1843 CHECK_VALID_SIZE(outputs.size(), 1);
1844
Finn Williamsc42c3842019-01-22 14:18:11 +00001845 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001846 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001847 activationDesc.m_Function = activationType;
1848
1849 switch (activationType)
1850 {
1851 case ActivationFunction::ReLu:
1852 {
1853 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1854 break;
1855 }
1856 case ActivationFunction::BoundedReLu:
1857 {
1858 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1859 activationDesc.m_A = 6.0f;
1860 activationDesc.m_B = 0.0f;
1861 break;
1862 }
1863 case ActivationFunction::Sigmoid:
1864 {
1865 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1866 break;
1867 }
Nina Drozd99851762019-04-09 09:37:38 +01001868 case ActivationFunction::TanH:
1869 {
1870 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1871 activationDesc.m_A = 1.0f;
1872 activationDesc.m_B = 1.0f;
1873 break;
1874 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001875 default:
1876 {
1877 throw ParseException(
1878 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1879 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1880 }
1881 }
1882
1883 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001884
1885 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1886 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1887
1888 // register the input connection slots for the layer, connections are made after all layers have been created
1889 // only the tensors for the inputs are relevant, exclude the const tensors
1890 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1891 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1892
1893 // register the output connection slots for the layer, connections are made after all layers have been created
1894 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1895 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1896}
Sadikb94967b2018-09-19 15:30:00 +01001897armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1898 const std::vector<int32_t> & targetDimsIn)
1899{
1900 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1901 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1902
1903 if (stretchDim != targetDimsIn.end())
1904 {
1905 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1906 {
1907 throw ParseException(
1908 boost::str(
1909 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1910 }
1911
1912 auto targetNumElements =
1913 boost::numeric_cast<unsigned int>(
1914 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1915
1916 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1917 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1918 }
1919
1920 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1921
1922 TensorInfo reshapeInfo = inputTensorInfo;
1923 reshapeInfo.SetShape(outputShape);
1924
1925 return reshapeInfo;
1926}
1927
1928void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1929{
1930 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1931
1932 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001933
1934 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1935 CHECK_VALID_SIZE(outputs.size(), 1);
1936
1937 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1938 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1939
1940 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001941 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1942 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001943 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1944
kevmay0171972a82018-12-17 14:28:03 +00001945 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001946 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1947 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001948 {
1949 std::stringstream ss;
1950 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001951 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001952 << " does not equal output shape "
1953 << actualOutputTensorInfo.GetShape()
1954 << ": "
1955 << CHECK_LOCATION().AsString();
1956 throw ParseException(ss.str());
1957 }
1958
Sadikb94967b2018-09-19 15:30:00 +01001959 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001960 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001961
1962 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1963 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001964 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001965
1966 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1967 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1968
1969 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1970 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1971}
1972
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001973void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1974{
Sadik Armagana3b31f02019-12-05 09:08:53 +00001975 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
1976}
1977
1978void TfLiteParser::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
1979{
1980 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
1981}
1982
1983void TfLiteParser::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
1984{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001985 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1986
1987 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1988 CHECK_VALID_SIZE(inputs.size(), 2);
1989
1990 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1991 CHECK_VALID_SIZE(outputs.size(), 1);
1992
1993 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1994
1995 // Data for the parsed tensor args (size) must be stored locally.
1996 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1997
1998 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1999 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2000
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002001 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002002 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002003 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002004 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2005 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002006
Sadik Armagana3b31f02019-12-05 09:08:53 +00002007 auto layerName = str(boost::format("Resize:"));
2008
2009 switch (resizeMethod)
2010 {
2011 case ResizeMethod::Bilinear:
2012 {
2013 layerName += str(boost::format("BILINEAR:%1%:%2%") % subgraphIndex % operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002014
2015 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2016 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2017
2018 desc.m_BilinearAlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002019 break;
2020 }
2021 case ResizeMethod::NearestNeighbor:
2022 {
2023 layerName += str(boost::format("NEARESTNEIGHBOR:%1%:%2%") % subgraphIndex % operatorIndex);
2024 break;
2025 }
2026 default:
2027 {
2028 throw ParseException(
2029 boost::str(boost::format("Unexpected ResizeMethod[%1%] when creating layerName "
2030 " %2% ") %static_cast<int>(resizeMethod)% CHECK_LOCATION().AsString()));
2031 }
2032 }
2033
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002034 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002035
2036 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2037 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2038
2039 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2040 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2041
2042 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2043 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2044}
2045
Sadik Armagan479045b2018-10-01 11:51:37 +01002046void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
2047{
2048 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2049
2050 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2051 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2052
2053 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2054
2055 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2056 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2057 CHECK_VALID_SIZE(outputs.size(), 1);
2058
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002059 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2060 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002061
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002062 const unsigned int concatDimInput = static_cast<unsigned int>(
2063 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002064
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002065 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2066 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002067
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002068 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002069
2070 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2071 {
2072 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2073
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002074 // This set up concatDescriptor view origin
2075 armnnUtils::ProcessConcatInputTensorInfo(
2076 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002077 }
2078
2079 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01002080 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01002081
2082 BOOST_ASSERT(layer != nullptr);
2083
2084 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2085 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01002086
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002087 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002088
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002089 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002090
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002091 // add fused activation layer
2092 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002093
Sadik Armagan479045b2018-10-01 11:51:37 +01002094 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2095 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2096}
2097
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002098void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
2099{
2100 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2101
2102 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2103 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2104
2105 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2106
2107 FullyConnectedDescriptor desc;
2108 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002109 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002110
2111 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2112 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2113 CHECK_VALID_SIZE(outputs.size(), 1);
2114
2115 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2116
2117 // Fully Connected Layer accepts two dimensional weights input
2118 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2119 if (weightsDimension != 2)
2120 {
2121 throw ParseException(
2122 boost::str(
2123 boost::format(
2124 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
2125 "Node %2%")
2126 % weightsDimension
2127 % CHECK_LOCATION().AsString()));
2128 }
2129
Matteo Martincigh747ef822018-12-18 09:26:39 +00002130 auto filterTensorAndData = CreateConstTensor(inputs[1],
2131 filterTensorInfo,
2132 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002133 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002134 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
2135
2136 if (inputs.size() == 3)
2137 {
2138 desc.m_BiasEnabled = true;
2139 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00002140 auto biasTensorAndData = CreateConstTensor(inputs[2],
2141 biasTensorInfo,
2142 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002143 layer = m_Network->AddFullyConnectedLayer(desc,
2144 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002145 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002146 layerName.c_str());
2147 }
2148 else
2149 {
2150 layer = m_Network->AddFullyConnectedLayer(desc,
2151 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002152 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002153 layerName.c_str());
2154 }
2155 BOOST_ASSERT(layer != nullptr);
2156
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002157 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2158
2159 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2160
2161 if (inputTensorInfo.GetNumDimensions() > 2)
2162 {
2163 // Add reshape to flatten to 2D [batch_size, input_size],
2164 // where "input_size" corresponds to the number of inputs to the layer,
2165 // matching the second dimension of weights,
2166 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2167 std::vector<unsigned int> reshapedDimensions(2);
2168 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2169 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2170
2171 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2172 {
2173 throw ParseException(
2174 boost::str(
2175 boost::format(
2176 "Failed to deduce input tensor shape from filter size %1%")
2177 % reshapedDimensions[1]
2178 % CHECK_LOCATION().AsString()));
2179 }
2180
2181 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2182 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2183
2184 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2185 armnn::ReshapeDescriptor desc;
2186 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2187 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2188
2189 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2190 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2191
2192 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2193 }
2194 else
2195 {
2196 // register the input connection slot for the layer
2197 // only the tensors for the inputs are relevant, exclude the const tensors
2198 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2199 }
2200
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002201 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2202 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2203
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002204 // we need to add the activation layer and fortunately we don't need to care about the data layout
2205 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2206 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002207
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002208 // register the output connection slots for the layer, connections are made after all layers have been created
2209 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2210 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2211}
2212
keidav011b3e2ea2019-02-21 10:07:37 +00002213void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
2214{
2215 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2216
2217 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2218
2219 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2220 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2221 CHECK_VALID_SIZE(outputs.size(), 4);
2222
2223 // Obtain custom options from flexbuffers
2224 auto custom_options = operatorPtr->custom_options;
2225 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2226
2227 // Obtain descriptor information from tf lite
2228 DetectionPostProcessDescriptor desc;
2229 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2230 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2231 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2232 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2233 desc.m_NumClasses = m["num_classes"].AsUInt32();
2234 desc.m_ScaleH = m["h_scale"].AsFloat();
2235 desc.m_ScaleW = m["w_scale"].AsFloat();
2236 desc.m_ScaleX = m["x_scale"].AsFloat();
2237 desc.m_ScaleY = m["y_scale"].AsFloat();
2238
keidav0107d58c72019-02-26 11:57:39 +00002239 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002240 {
keidav0107d58c72019-02-26 11:57:39 +00002241 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002242 }
2243 if (!(m["detections_per_class"].IsNull()))
2244 {
2245 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2246 }
2247
2248 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2249 {
2250 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2251 "must be positive and less than or equal to 1.");
2252 }
2253
2254 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2255 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2256 armnn::Optional<armnn::PermutationVector&>());
2257
2258 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2259 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2260 layerName.c_str());
2261
2262 BOOST_ASSERT(layer != nullptr);
2263
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002264 // The model does not specify the output shapes.
2265 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2266 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2267 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2268 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2269 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2270 m_OverridenOutputShapes.push_back({ 1 });
2271
keidav011b3e2ea2019-02-21 10:07:37 +00002272 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2273 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002274 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002275 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2276 }
2277
2278 // Register the input connection slots for the layer, connections are made after all layers have been created
2279 // only the tensors for the inputs are relevant, exclude the const tensors
2280 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2281 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2282
2283 // Register the output connection slots for the layer, connections are made after all layers have been created
2284 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2285 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2286 outputTensorIndexes[1],
2287 outputTensorIndexes[2],
2288 outputTensorIndexes[3]});
2289}
2290
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002291/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2292void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2293{
2294 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2295
2296 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2297 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2298 CHECK_VALID_SIZE(outputs.size(), 1);
2299
2300 if (inputs.size() < 1)
2301 {
2302 throw ParseException("Pack must have at least one input.");
2303 }
2304
2305 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2306 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2307
2308 StackDescriptor desc;
2309 desc.m_Axis = static_cast<uint32_t>(options->axis);
2310 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2311
2312 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2313 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2314 desc.m_InputShape = inputTensorInfo.GetShape();
2315
2316 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2317 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2318
2319 BOOST_ASSERT(layer != nullptr);
2320
2321 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2322 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2323
2324 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2325 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2326
2327 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2328 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2329}
2330
Nina Drozd200e3802019-04-15 09:47:39 +01002331void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2332{
2333 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2334
2335 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2336 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2337
2338 // This unpackAxis indicates the axis to unpack
2339 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2340
2341 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2342 CHECK_VALID_SIZE(inputs.size(), 1);
2343
2344 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002345
2346 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2347 {
2348 throw ParseException(
2349 boost::str(
2350 boost::format(
2351 "The unpack axis: %1% cannot be greater than or equal to "
2352 "the number of input dimension %2% %3%")
2353 % unpackAxis
2354 % inputTensorInfo.GetNumDimensions()
2355 % CHECK_LOCATION().AsString()));
2356 }
2357
Nina Drozd200e3802019-04-15 09:47:39 +01002358 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2359 // If num is not defined, automatically infer from the length of the dimension axis.
2360 if(unpackNum == 0)
2361 {
2362 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2363 }
2364
2365 // If unpack number cannot be inferred and is still zero, throw ParseException.
2366 if(unpackNum == 0)
2367 {
2368 throw ParseException("Number to unpack must greater than zero.");
2369 }
2370
2371 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2372 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2373
2374 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2375 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2376
2377 // Add current input shape to unpackDimSizes
2378 for (unsigned int i = 0; i < inputDimSize; ++i)
2379 {
2380 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2381 }
2382
2383 if (unpackDimSizes[unpackAxis] != unpackNum)
2384 {
2385 throw ParseException("Number to unpack must be the same as length of the dimension to "
2386 "unpack along.");
2387 }
2388
2389 unpackDimSizes[unpackAxis] /= unpackNum;
2390
2391 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2392 for (unsigned int j = 0; j < unpackNum; ++j)
2393 {
2394 // Set the size of the views.
2395 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2396 {
2397 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2398 }
2399 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2400 }
2401
2402 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2403 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2404
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002405 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2406 unpackDimSizes.data());
2407
Nina Drozd200e3802019-04-15 09:47:39 +01002408 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2409 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2410
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002411 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2412 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2413 {
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002414 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002415 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2416 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002417 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002418 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2419
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002420 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2421 outputTensorInfo.GetDataType(),
2422 outputTensorInfo.GetQuantizationScale(),
2423 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002424 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2425
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002426 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002427
2428 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2429 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2430 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2431 }
Nina Drozd200e3802019-04-15 09:47:39 +01002432}
2433
Nina Drozd0324f482019-04-08 10:52:10 +01002434void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2435{
2436 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2437
2438 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2439 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2440
2441 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2442
Nina Drozd200e3802019-04-15 09:47:39 +01002443 // If number of splits cannot be inferred and is zero, throw ParseException.
2444 if(numSplits == 0)
2445 {
2446 throw ParseException("Number to splits must greater than zero.");
2447 }
2448
Nina Drozd0324f482019-04-08 10:52:10 +01002449 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2450 CHECK_VALID_SIZE(inputs.size(), 2);
2451 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2452 CHECK_VALID_SIZE(outputs.size(), numSplits);
2453
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002454 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2455 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002456
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002457 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2458 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2459 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2460
2461 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2462 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002463
2464 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2465 if (splitDim == 0 || splitDim == 2)
2466 {
2467 throw ParseException(
2468 boost::str(
2469 boost::format(
2470 "Dimension %1% for split is not supported by Armnn. %2%")
2471 % splitDim
2472 % CHECK_LOCATION().AsString()));
2473 }
2474
2475 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002476 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002477 {
2478 throw ParseException(
2479 boost::str(
2480 boost::format(
2481 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002482 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002483 % inputTensorInfo.GetNumDimensions()
2484 % MaxNumOfTensorDimensions
2485 % CHECK_LOCATION().AsString()));
2486 }
2487
2488 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2489
2490 // Add current input shape to splitterDimSizes
2491 for (unsigned int i = 0; i < inputDimSize; ++i)
2492 {
2493 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2494 }
2495
2496 if (splitterDimSizes[splitDim] % numSplits != 0)
2497 {
2498 throw ParseException("Number of splits must evenly divide the dimension");
2499 }
2500 splitterDimSizes[splitDim] /= numSplits;
2501
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002502 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002503 for (unsigned int j = 0; j < numSplits; ++j)
2504 {
2505 // Set the size of the views.
2506 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2507 {
2508 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2509 }
2510 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2511 }
2512
2513 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2514 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2515
2516 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002517 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002518
Nina Drozd0324f482019-04-08 10:52:10 +01002519 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2520 {
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002521 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k]);
2522 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002523 }
2524
2525 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2526 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2527}
2528
Sadik Armagan58f39192018-09-17 14:14:39 +01002529armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2530 unsigned int outputSlot,
2531 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002532{
2533 ActivationDescriptor activationDesc;
2534 std::string layerName = prevLayer->GetName();
2535
2536 switch(activationType)
2537 {
2538 case tflite::ActivationFunctionType_NONE:
2539 {
2540 // this is a no-op: return previous layer
2541 return prevLayer;
2542 }
2543 case tflite::ActivationFunctionType_RELU:
2544 {
2545 activationDesc.m_Function = ActivationFunction::ReLu;
2546 layerName += ":RELU";
2547 break;
2548 }
2549 case tflite::ActivationFunctionType_RELU6:
2550 {
2551 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2552 activationDesc.m_A = 6.0f;
2553 activationDesc.m_B = 0.0f;
2554 layerName += ":RELU6";
2555 break;
2556 }
2557 case tflite::ActivationFunctionType_TANH:
2558 {
2559 activationDesc.m_Function = ActivationFunction::TanH;
2560 activationDesc.m_A = 1.0f;
2561 activationDesc.m_B = 1.0f;
2562 layerName += ":TANH";
2563 break;
2564 }
2565
2566 // I only put these here as a reminder what others we could support
2567 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2568 case tflite::ActivationFunctionType_SIGN_BIT:
2569 default:
2570 {
2571 throw ParseException(
2572 boost::str(
2573 boost::format("TfLite parser doesn't suppport fused activation: "
2574 "%1%/%2% %3% ") %
2575 activationType %
2576 tflite::EnumNameActivationFunctionType(activationType) %
2577 CHECK_LOCATION().AsString()));
2578
2579 }
2580 }
2581
2582 IConnectableLayer* activationLayer =
2583 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2584
2585 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2586 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2587 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2588 return activationLayer;
2589}
2590
2591TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2592{
2593 if (fileName == nullptr)
2594 {
2595 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2596 CHECK_LOCATION().AsString()));
2597 }
2598 boost::system::error_code errorCode;
2599 boost::filesystem::path pathToFile(fileName);
2600 if (!boost::filesystem::exists(pathToFile, errorCode))
2601 {
2602 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2603 fileName %
2604 errorCode %
2605 CHECK_LOCATION().AsString()));
2606 }
2607 std::ifstream file(fileName, std::ios::binary);
2608 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2609 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2610 fileContent.size());
2611}
2612
2613TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2614{
2615 if (binaryContent == nullptr)
2616 {
2617 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2618 CHECK_LOCATION().AsString()));
2619 }
2620 flatbuffers::Verifier verifier(binaryContent, len);
2621 if (verifier.VerifyBuffer<tflite::Model>() == false)
2622 {
2623 throw ParseException(
2624 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2625 "flatbuffers format. size:%1% %2%") %
2626 len %
2627 CHECK_LOCATION().AsString()));
2628 }
2629 return tflite::UnPackModel(binaryContent);
2630}
2631
2632TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2633 size_t subgraphIndex,
2634 size_t operatorIndex)
2635{
2636 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2637
Derek Lambertiff05cc52019-04-26 13:05:17 +01002638 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2639 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002640
2641 size_t inputCount = operatorPtr->inputs.size();
2642 TensorRawPtrVector result(inputCount);
2643 for (size_t i=0; i<inputCount; ++i)
2644 {
2645 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002646 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002647 }
2648 return result;
2649}
2650
2651TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2652 size_t subgraphIndex,
2653 size_t operatorIndex)
2654{
2655 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2656
Derek Lambertiff05cc52019-04-26 13:05:17 +01002657 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2658 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002659
2660 size_t outputCount = operatorPtr->outputs.size();
2661 TensorRawPtrVector result(outputCount);
2662 for (size_t i=0; i<outputCount; ++i)
2663 {
2664 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2665 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002666 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002667 }
2668 return result;
2669}
2670
2671TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2672 size_t subgraphIndex)
2673{
2674 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002675 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002676
Derek Lambertiff05cc52019-04-26 13:05:17 +01002677 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002678 TensorIdRawPtrVector result(inputCount);
2679 for (size_t i=0; i<inputCount; ++i)
2680 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002681 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002682 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002683 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002684 }
2685 return result;
2686}
2687
2688TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2689 size_t subgraphIndex)
2690{
2691 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002692 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002693
Derek Lambertiff05cc52019-04-26 13:05:17 +01002694 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002695 TensorIdRawPtrVector result(outputCount);
2696 for (size_t i=0; i<outputCount; ++i)
2697 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002698 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2699 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002700 }
2701 return result;
2702}
2703
2704std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2705 size_t subgraphIndex,
2706 size_t operatorIndex)
2707{
2708 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002709 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2710 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002711 return operatorPtr->inputs;
2712}
2713
2714std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2715 size_t subgraphIndex,
2716 size_t operatorIndex)
2717{
2718 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002719 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2720 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002721 return operatorPtr->outputs;
2722}
2723
2724void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2725 size_t operatorIndex,
2726 IConnectableLayer* layer,
2727 const std::vector<unsigned int>& tensorIndexes)
2728{
2729 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2730 BOOST_ASSERT(layer != nullptr);
2731 if (tensorIndexes.size() != layer->GetNumInputSlots())
2732 {
2733 throw ParseException(
2734 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2735 " for subgraph:%3% operator index:%4% %5%") %
2736 tensorIndexes.size() %
2737 layer->GetNumInputSlots() %
2738 subgraphIndex %
2739 operatorIndex %
2740 CHECK_LOCATION().AsString()));
2741 }
2742
2743 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2744 {
2745 unsigned int tensorIndex = tensorIndexes[slotIndex];
2746 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2747 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2748 }
2749}
2750
2751void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2752 size_t operatorIndex,
2753 IConnectableLayer* layer,
2754 const std::vector<unsigned int>& tensorIndexes)
2755{
2756 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2757 BOOST_ASSERT(layer != nullptr);
2758 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2759 {
2760 throw ParseException(
2761 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2762 " for subgraph:%3% operator index:%4% %5%") %
2763 tensorIndexes.size() %
2764 layer->GetNumOutputSlots() %
2765 subgraphIndex %
2766 operatorIndex %
2767 CHECK_LOCATION().AsString()));
2768 }
2769
2770 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2771 {
2772 unsigned int tensorIndex = tensorIndexes[slotIndex];
2773 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2774 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2775 }
2776}
2777
2778void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2779{
2780 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2781
2782 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2783 for (auto const & tensorIdAndPtr : inputs)
2784 {
2785 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2786 IConnectableLayer* layer =
2787 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2788
2789 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2790 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2791
2792 RegisterOutputSlots(subgraphIndex,
2793 VIRTUAL_OPERATOR_ID,
2794 layer,
2795 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2796 }
2797}
2798
2799void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2800{
2801 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2802
2803 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2804 for (auto const & tensorIdAndPtr : outputs)
2805 {
2806 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2807 IConnectableLayer* layer =
2808 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2809
2810 RegisterInputSlots(subgraphIndex,
2811 VIRTUAL_OPERATOR_ID,
2812 layer,
2813 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2814 }
2815}
2816
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002817void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2818{
2819 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2820
Derek Lambertiff05cc52019-04-26 13:05:17 +01002821 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002822 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2823 {
2824 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2825 {
2826 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2827 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2828 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002829 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002830 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2831 auto tensorAndData = CreateConstTensor(tensorPtr,
2832 tensorInfo,
2833 armnn::Optional<armnn::PermutationVector&>());
2834
2835 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2836 IConnectableLayer *layer =
2837 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2838
2839 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2840 RegisterOutputSlots(subgraphIndex,
2841 VIRTUAL_OPERATOR_ID,
2842 layer,
2843 { tensorIndex });
2844
2845 }
2846 }
2847 }
2848}
2849
telsoa01c577f2c2018-08-31 09:22:23 +01002850// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2851TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2852{
2853 CHECK_BUFFER(model, bufferIndex);
2854 return model->buffers[bufferIndex].get();
2855}
2856
Matteo Martincigh747ef822018-12-18 09:26:39 +00002857template<typename T>
2858std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2859TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2860 TfLiteParser::TensorRawPtr tensorPtr,
2861 armnn::TensorInfo& tensorInfo,
2862 armnn::Optional<armnn::PermutationVector&> permutationVector)
2863{
2864 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2865 tensorPtr,
2866 tensorInfo,
2867 permutationVector);
2868 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2869 return std::make_pair(constData.first, std::move(storage));
2870}
2871
telsoa01c577f2c2018-08-31 09:22:23 +01002872std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2873TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002874 armnn::TensorInfo& tensorInfo,
2875 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002876{
2877 CHECK_TENSOR_PTR(tensorPtr);
2878 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2879 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2880
2881 switch (tensorInfo.GetDataType())
2882 {
2883 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002884 return CreateConstTensorAndStoreData<float>(bufferPtr,
2885 tensorPtr,
2886 tensorInfo,
2887 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00002888 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002889 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2890 tensorPtr,
2891 tensorInfo,
2892 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00002893 case armnn::DataType::QSymmS8:
2894 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
2895 tensorPtr,
2896 tensorInfo,
2897 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002898 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002899 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2900 tensorPtr,
2901 tensorInfo,
2902 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002903 default:
2904 {
2905 std::stringstream errString;
2906 errString << "Unexpected datatype when creating const tensor: "
2907 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2908 << " shape:" << tensorInfo.GetShape()
2909 << CHECK_LOCATION().AsString();
2910 throw ParseException(errString.str());
2911 }
2912 }
2913}
2914
2915BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2916 const std::string& name) const
2917{
2918 CHECK_SUBGRAPH(m_Model, subgraphId);
2919 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2920 for (auto const & input : inputs)
2921 {
2922 if (input.second->name == name)
2923 {
2924 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2925 return std::make_pair(bindingId, ToTensorInfo(input.second));
2926 }
2927 }
2928
2929 std::stringstream bindings;
2930 for (auto const & input : inputs)
2931 {
2932 bindings << "'" << input.second->name << "' ";
2933 }
2934
2935 throw ParseException(
2936 boost::str(
2937 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2938 "Possible inputs are: [%3%] %4%") %
2939 subgraphId %
2940 name %
2941 bindings.str() %
2942 CHECK_LOCATION().AsString()));
2943}
2944
2945BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2946 const std::string& name) const
2947{
2948 CHECK_SUBGRAPH(m_Model, subgraphId);
2949 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002950 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002951 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002952 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002953 if (output.second->name == name)
2954 {
2955 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002956 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2957 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2958 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002959 }
2960 }
2961
2962 std::stringstream bindings;
2963 for (auto const & output : outputs)
2964 {
2965 bindings << "'" << output.second->name << "' ";
2966 }
2967
2968 throw ParseException(
2969 boost::str(
2970 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2971 "Possible outputs are: [%3%] %4%") %
2972 subgraphId %
2973 name %
2974 bindings.str() %
2975 CHECK_LOCATION().AsString()));
2976}
2977
2978size_t TfLiteParser::GetSubgraphCount() const
2979{
2980 return m_Model->subgraphs.size();
2981}
2982
2983std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2984{
2985 CHECK_SUBGRAPH(m_Model, subgraphId);
2986 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2987 std::vector<std::string> result;
2988 result.reserve(inputs.size());
2989 for (auto const & input : inputs)
2990 {
2991 result.push_back(input.second->name);
2992 }
2993 return result;
2994}
2995
2996std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2997{
2998 CHECK_SUBGRAPH(m_Model, subgraphId);
2999 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3000 std::vector<std::string> result;
3001 result.reserve(outputs.size());
3002 for (auto const & output : outputs)
3003 {
3004 result.push_back(output.second->name);
3005 }
3006 return result;
3007}
3008
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003009ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003010{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003011 return new TfLiteParser(options);
telsoa01c577f2c2018-08-31 09:22:23 +01003012}
3013
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003014ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003015{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003016 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +01003017}
3018
3019void ITfLiteParser::Destroy(ITfLiteParser* parser)
3020{
3021 delete parser;
3022}
3023
3024TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
3025: m_FloatData(std::move(data))
3026, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003027, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003028, m_Int32Data(nullptr)
3029{
3030}
3031
3032TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
3033: m_FloatData(nullptr)
3034, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00003035, m_Int8Data(nullptr)
3036, m_Int32Data(nullptr)
3037{
3038}
3039
3040TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
3041: m_FloatData(nullptr)
3042, m_Uint8Data(nullptr)
3043, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01003044, m_Int32Data(nullptr)
3045{
3046}
3047
3048TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
3049: m_FloatData(nullptr)
3050, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003051, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003052, m_Int32Data(std::move(data))
3053{
3054}
3055
3056} // armnnTfLiteParser