blob: 937131ccd7becd6c814a5f8d2f32a6d89b0968a8 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026
27#include <fstream>
28#include <algorithm>
29#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010030#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000031#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010032
33using namespace armnn;
34using armnn::CheckLocation;
35namespace armnnTfLiteParser
36{
37namespace
38{
jimfly01c25411c2018-11-14 17:47:22 +000039
telsoa01c577f2c2018-08-31 09:22:23 +010040const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
41
42void CheckSubgraph(const TfLiteParser::ModelPtr & model,
43 size_t subgraphIndex,
44 const CheckLocation & location)
45{
46 if (model.get() == nullptr)
47 {
48 throw ParseException(
49 boost::str(
50 boost::format("%1% was called with invalid (null) model. "
51 "Possible reason is that the model is not yet loaded and Unpack(ed). "
52 "subgraph:%2% at %3%") %
53 location.m_Function %
54 subgraphIndex %
55 location.FileLine()));
56 }
57 else if (subgraphIndex >= model->subgraphs.size())
58 {
59 throw ParseException(
60 boost::str(
61 boost::format("%1% was called with an invalid subgraph index. "
62 "subgraph:%2% at %3%") %
63 location.m_Function %
64 subgraphIndex %
65 location.FileLine()));
66 }
67}
68
69#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
70 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
71
72void CheckModel(const TfLiteParser::ModelPtr & model,
73 size_t subgraphIndex,
74 size_t operatorIndex,
75 const CheckLocation & location)
76{
77 if (model.get() == nullptr)
78 {
79 throw ParseException(
80 boost::str(
81 boost::format("%1% was called with invalid (null) model. "
82 "Possible reason is that the model is not yet loaded and Unpack(ed). "
83 "subgraph:%2% operator:%3% at %4%") %
84 location.m_Function %
85 subgraphIndex %
86 operatorIndex %
87 location.FileLine()));
88 }
89 else if (subgraphIndex >= model->subgraphs.size())
90 {
91 throw ParseException(
92 boost::str(
93 boost::format("%1% was called with an invalid subgraph index. "
94 "subgraph:%2% operator:%3% at %4%") %
95 location.m_Function %
96 subgraphIndex %
97 operatorIndex %
98 location.FileLine()));
99 }
100 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
101 operatorIndex != VIRTUAL_OPERATOR_ID)
102 {
103 throw ParseException(
104 boost::str(
105 boost::format("%1% was called with an invalid operator index. "
106 "subgraph:%2% operator:%3% at %4%") %
107 location.m_Function %
108 subgraphIndex %
109 operatorIndex %
110 location.FileLine()));
111 }
112}
113
114#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
115 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
116
117void CheckTensor(const TfLiteParser::ModelPtr & model,
118 size_t subgraphIndex,
119 size_t tensorIndex,
120 const CheckLocation & location)
121{
122 // not checking model, because I assume CHECK_MODEL already run
123 // and checked that. An assert would do.
124 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
125
126 // also subgraph index should be checked by CHECK_MODEL so
127 // I only add an assert here
128 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
129
130 // the tensor index is the only one to check here
131 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
132 {
133 throw ParseException(
134 boost::str(
135 boost::format("%1% was called with an invalid tensor index. "
136 "subgraph:%2% tensor:%3% at %4%") %
137 location.m_Function %
138 subgraphIndex %
139 tensorIndex %
140 location.FileLine()));
141 }
142}
143
144#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
145 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
146
147void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
148 const CheckLocation & location)
149{
150 if (rawPtr == nullptr)
151 {
152 throw ParseException(
153 boost::str(
154 boost::format("%1% was called with a null tensor pointer. "
155 "at %2%") %
156 location.m_Function %
157 location.FileLine()));
158
159 }
160}
161
162#define CHECK_TENSOR_PTR(TENSOR_PTR) \
163 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
164
165void CheckBuffer(const TfLiteParser::ModelPtr & model,
166 size_t bufferIndex,
167 const CheckLocation & location)
168{
169 if (model.get() == nullptr)
170 {
171 throw ParseException(
172 boost::str(
173 boost::format("%1% was called with invalid (null) model. "
174 "Possible reason is that the model is not yet loaded and Unpack(ed). "
175 "buffer:%2% at %3%") %
176 location.m_Function %
177 bufferIndex %
178 location.FileLine()));
179 }
180 else if (bufferIndex >= model->buffers.size())
181 {
182 throw ParseException(
183 boost::str(
184 boost::format("%1% was called with an invalid buffer index. "
185 "buffer index:%2% at %3%") %
186 location.m_Function %
187 bufferIndex %
188 location.FileLine()));
189 }
190 else if (model->buffers[bufferIndex].get() == nullptr)
191 {
192 throw ParseException(
193 boost::str(
194 boost::format("The buffer #%1% is null. %3%") %
195 bufferIndex %
196 location.AsString()));
197 }
198}
199
200#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
201 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
202
203void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
204 const armnn::TensorInfo & tensorInfo,
205 uint32_t bufferId,
206 const CheckLocation & location)
207{
208 if (bufferPtr == nullptr)
209 {
210 throw ParseException(
211 boost::str(
212 boost::format("BufferPtr is null for buffer:%1%. %2%") %
213 bufferId %
214 location.AsString()));
215 }
216 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
217 tensorInfo.GetNumBytes() > bufferPtr->data.size())
218 {
219 std::stringstream ss;
220 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
221 << "For tensor: " << tensorInfo.GetShape()
222 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
223 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
224 throw ParseException(ss.str());
225 }
226}
227
228#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
229 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
230
231bool IsActivationSupported(tflite::ActivationFunctionType activationType)
232{
233 switch(activationType)
234 {
235 case tflite::ActivationFunctionType_NONE:
236 case tflite::ActivationFunctionType_RELU:
237 case tflite::ActivationFunctionType_RELU6:
238 case tflite::ActivationFunctionType_TANH:
239 {
240 return true;
241 }
242 default:
243 {
244 return false;
245 }
246 }
247}
248
249#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
250 do { \
251 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
252 { \
253 throw ParseException( \
254 boost::str( \
255 boost::format("TfLite parser doesn't suppport fused activation: " \
256 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
257 OPTION->fused_activation_function % \
258 tflite::EnumNameActivationFunctionType(\
259 OPTION->fused_activation_function) % \
260 __func__ % \
261 SUBGRAPH_INDEX % \
262 OPERATOR_INDEX % \
263 CHECK_LOCATION().FileLine())); \
264 } \
265 } while(false)
266
267
268std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
269{
270 std::vector<unsigned int> result;
271 result.reserve(in.size());
272 for (auto & i : in)
273 {
274 result.push_back(CHECKED_NON_NEGATIVE(i));
275 }
276 return result;
277}
278
279void CalcPadding(uint32_t inputSize,
280 uint32_t filterSize,
281 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100282 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100283 uint32_t& paddingFront,
284 uint32_t& paddingBack,
285 tflite::Padding padding)
286{
287 paddingFront = 0;
288 paddingBack = 0;
289 if (padding == tflite::Padding_SAME)
290 {
291 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100292 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
293 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100294 if (temp > inputSize)
295 {
296 paddingFront = (temp - inputSize) / 2;
297 paddingBack = (temp - inputSize) - paddingFront;
298 }
299 }
300}
301
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000302armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100303{
304 armnn::DataType type;
305 CHECK_TENSOR_PTR(tensorPtr);
306
307 switch (tensorPtr->type)
308 {
309 case tflite::TensorType_UINT8:
310 type = armnn::DataType::QuantisedAsymm8;
311 break;
312 case tflite::TensorType_FLOAT32:
313 type = armnn::DataType::Float32;
314 break;
315 case tflite::TensorType_INT32:
316 type = armnn::DataType::Signed32;
317 break;
318
319 default:
320 {
321 CheckLocation location = CHECK_LOCATION();
322 throw ParseException(
323 boost::str(
324 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
325 tensorPtr->type %
326 tflite::EnumNameTensorType(tensorPtr->type) %
327 tensorPtr->name %
328 location.AsString()));
329 }
330 }
331
332 float quantizationScale = 0.0f;
333 int32_t quantizationOffset = 0;
334
335 if (tensorPtr->quantization.get())
336 {
337 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
338 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
339
340 if (tensorPtr->quantization->scale.size() == 1)
341 {
342 quantizationScale = tensorPtr->quantization->scale[0];
343 }
344 if (tensorPtr->quantization->zero_point.size() == 1)
345 {
346 // NOTE: we lose precision here when converting from 64 bit to 32
347 // but this is what we support at the monent in ArmNN
348 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
349 }
350 }
351
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100352 std::vector<unsigned int> safeShape = shapes;
353 if (safeShape.size() == 0)
354 {
355 safeShape.push_back(1);
356 }
357
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // two statements (on purpose) for easier debugging:
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100359 armnn::TensorInfo result(static_cast<unsigned int>(safeShape.size()),
360 safeShape.data(),
telsoa01c577f2c2018-08-31 09:22:23 +0100361 type,
362 quantizationScale,
363 quantizationOffset);
364 return result;
365}
366
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000367armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
368{
369 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
370 return ToTensorInfo(tensorPtr, dimensions);
371}
372
telsoa01c577f2c2018-08-31 09:22:23 +0100373template<typename T>
374std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
375CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
376 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000377 armnn::TensorInfo& tensorInfo,
378 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100379{
380 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
381 BOOST_ASSERT_MSG(bufferPtr != nullptr,
382 boost::str(
383 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
384
385 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000386
387 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
388 {
389 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000390 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
391 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000392 }
393 else
394 {
395 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
396 }
397
telsoa01c577f2c2018-08-31 09:22:23 +0100398 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
399}
400
telsoa01c577f2c2018-08-31 09:22:23 +0100401armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
402{
403 // generate the binding id by shifting the tensor id by 8 bit
404 // and add the subgraph id, which allows 256 subgraphs
405 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
406}
407
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000408bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
409{
410 const unsigned int actualSize = actual.GetNumDimensions();
411 if (actualSize != expected.size())
412 {
413 return false;
414 }
415
416 for (unsigned int i = 0u; i < actualSize; i++)
417 {
418 if (expected[i] < 0 ||
419 actual[i] != static_cast<unsigned int>(expected[i]))
420 {
421 return false;
422 }
423 }
424
425 return true;
426}
427
telsoa01c577f2c2018-08-31 09:22:23 +0100428} // <anonymous>
429
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100430TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
431: m_Options(options)
432, m_Network(nullptr, nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +0100433, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
434{
435 // register supported operators
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100436 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
437 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
438 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
439 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
440 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
441 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
442 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
443 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
444 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
445 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
446 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
447 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
448 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
449 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
450 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
451 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
452 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
453 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
454 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
455 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
456 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
457 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
458 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
459 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
460 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
461 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
462 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
463 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
464 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
465 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
466 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
467
468 // register supported custom operators
469 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100470}
471
472void TfLiteParser::ResetParser()
473{
474 m_Network = armnn::INetworkPtr(nullptr, nullptr);
475 m_Model = nullptr;
476 m_SubgraphConnections.clear();
477}
478
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200479void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
480 size_t operatorIndex,
481 IConnectableLayer *layer)
482{
483 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
484 BOOST_ASSERT(layer != nullptr);
485
Derek Lambertiff05cc52019-04-26 13:05:17 +0100486 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
487 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200488
489 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
490
491 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100492 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200493 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100494 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200495
496 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
497 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
498
499 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
500 {
501 uint32_t id = reshapedInputId;
502 reshapedInputId = inputId;
503 inputId = id;
504
505 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
506 inputTensorInfo = ToTensorInfo(tensorPtr);
507 }
508
509 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
510
511 std::vector<unsigned> reshapedDim;
512 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
513 {
514 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
515 }
516
517 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
518 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
519
520 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
521
522 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
523 armnn::ReshapeDescriptor desc;
524 desc.m_TargetShape = reshapedTensorInfo.GetShape();
525 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
526
527 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
528 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
529
530 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
531
532 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
533 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
534}
535
telsoa01c577f2c2018-08-31 09:22:23 +0100536INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
537{
538 ResetParser();
539 m_Model = LoadModelFromFile(graphFile);
540 return CreateNetworkFromModel();
541}
542
543INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
544{
545 ResetParser();
546 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
547 return CreateNetworkFromModel();
548}
549
550INetworkPtr TfLiteParser::CreateNetworkFromModel()
551{
552 m_Network = INetwork::Create();
553 BOOST_ASSERT(m_Model.get() != nullptr);
554
555 bool failedToCreate = false;
556 std::stringstream errors;
557
558 if (m_Model->subgraphs.size() != 1)
559 {
560 throw ParseException(
561 boost::str(
562 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
563 m_Model->subgraphs.size() %
564 CHECK_LOCATION().AsString()));
565 }
566
567 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100568 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100569 {
570 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
571
572 size_t operatorIndex = 0;
573 for (OperatorPtr const & op : subgraph->operators)
574 {
575 try
576 {
telsoa01c577f2c2018-08-31 09:22:23 +0100577 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
578 auto builtinCode = opCodePtr->builtin_code;
579
580 if (builtinCode > tflite::BuiltinOperator_MAX)
581 {
582 throw ParseException(
583 boost::str(
584 boost::format("Operator code %1% is out of range 0-%2%. "
585 "subgraph:%3% operator idx:%4%. %5%") %
586 builtinCode %
587 tflite::BuiltinOperator_MAX %
588 subgraphIndex %
589 operatorIndex %
590 CHECK_LOCATION().AsString()));
591 }
592
593 // lookup and call the parser function
594 auto & parserFunction = m_ParserFunctions[builtinCode];
595 (this->*parserFunction)(subgraphIndex, operatorIndex);
596 }
597 catch (const ParseException& e)
598 {
599 failedToCreate = true;
600 std::stringstream errorString;
601
602 errorString << "Failed to parse operator #" << operatorIndex
603 << " within subgraph #" << subgraphIndex
604 << " error: " << e.what();
605 BOOST_LOG_TRIVIAL(error) << errorString.str();
606
607 errors << errorString.str() << "\n";
608 }
609 ++operatorIndex;
610 }
611
612 SetupInputLayers(subgraphIndex);
613 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200614 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100615
616 ++subgraphIndex;
617 }
618
619 if (failedToCreate)
620 {
621 // we can skip everything and let the outer exception handler deal with the error
622 throw ParseException(errors.str());
623 }
624
625 // establish the connections from the layer outputs to the inputs of the subsequent layers
626 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
627 {
628 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
629 {
630 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
631 {
632 for (size_t inputSlotIdx = 0;
633 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
634 ++inputSlotIdx)
635 {
636 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
637 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
638 }
639 }
640 }
641 }
642
643 return std::move(m_Network);
644}
645
646void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
647 size_t tensorIndex,
648 armnn::IOutputSlot* slot)
649{
650 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
651 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
652 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
653
654 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
655
656 // assuming there is only one producer for that tensor
657 if (tensorSlots.outputSlot != nullptr)
658 {
659 throw ParseException(boost::str(
660 boost::format("Another layer has already registered itself as the producer of "
661 "subgraph:%1% tensor:%2% %3%") %
662 subgraphIndex %
663 tensorIndex %
664 CHECK_LOCATION().AsString()));
665 }
666
667 tensorSlots.outputSlot = slot;
668}
669
670void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
671 size_t tensorIndex,
672 armnn::IInputSlot* slot)
673{
674 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
675 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
676 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
677
678 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
679 tensorSlots.inputSlots.push_back(slot);
680}
681
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100682void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
683{
684 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
685
686 // NOTE: By default we presume the custom operator is not supported
687 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
688
689 // Identify custom code defined for custom operator
690 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
691 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
692
693 // Find parser function that correspondes to custom code (if any)
694 auto iterator = m_CustomParserFunctions.find(customCode);
695 if (iterator != m_CustomParserFunctions.end())
696 {
697 customParserFunction = iterator->second;
698 }
699
700 // Run parser function
701 (this->*customParserFunction)(subgraphIndex, operatorIndex);
702}
703
telsoa01c577f2c2018-08-31 09:22:23 +0100704void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
705{
706 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100707
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100708 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
709
710 auto opcodeIndex = operatorPtr->opcode_index;
711 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
712
713 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
714 {
715 // Do not add StandInLayer, throw ParseException instead
716 throw ParseException(
717 boost::str(
718 boost::format("Operator not supported. "
719 "subgraph:%1% operator:%2% "
720 "opcode_index:%3% opcode:%4% / %5% %6%") %
721 subgraphIndex %
722 operatorIndex %
723 opcodeIndex %
724 opcode %
725 tflite::EnumNameBuiltinOperator(opcode) %
726 CHECK_LOCATION().AsString()));
727 }
728
729 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
730 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
731
732 const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
733 const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
734
735 StandInDescriptor descriptor(numInputs, numOutputs);
736 auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
737
738 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
739 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
740 for (unsigned int i = 0u; i < numOutputs; ++i)
741 {
742 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i]));
743 }
744
745 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
746 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
747
748 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
749 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100750}
751
telsoa01c577f2c2018-08-31 09:22:23 +0100752void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
753{
754 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
755
756 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
757 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
758
759 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
760
761 Convolution2dDescriptor desc;
762 desc.m_BiasEnabled = false;
763 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
764 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000765 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100766 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
767 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000768
telsoa01c577f2c2018-08-31 09:22:23 +0100769 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
770 CHECK_VALID_SIZE(inputs.size(), 2, 3);
771
772 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
773 CHECK_VALID_SIZE(outputs.size(), 1);
774
775 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
776 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
777
778 // assuming input is NHWC
779 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
780 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
781
782 // assuming the filter is OHWI : Output, H, W, Input
783 // which is essentially the same as NHWC
784 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
785 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
786
Pablo Tellof0bd6832019-04-26 17:58:13 +0100787 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
788 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
789 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
790 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100791
Matteo Martincigh747ef822018-12-18 09:26:39 +0000792 auto filterTensorAndData = CreateConstTensor(inputs[1],
793 filterTensorInfo,
794 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100795 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100796
797 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
798
799 if (inputs.size() == 3)
800 {
801 desc.m_BiasEnabled = true;
802 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000803 auto biasTensorAndData = CreateConstTensor(inputs[2],
804 biasTensorInfo,
805 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100806 layer = m_Network->AddConvolution2dLayer(desc,
807 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100808 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100809 layerName.c_str());
810 }
811 else
812 {
813 layer = m_Network->AddConvolution2dLayer(desc,
814 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100815 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100816 layerName.c_str());
817 }
818
819 BOOST_ASSERT(layer != nullptr);
820
telsoa01c577f2c2018-08-31 09:22:23 +0100821 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000822 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100823
824 // register the input connection slots for the layer, connections are made after all layers have been created
825 // only the tensors for the inputs are relevant, exclude the const tensors
826 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000827 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100828
jimfly01c25411c2018-11-14 17:47:22 +0000829 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100830 // register the output connection slots for the layer, connections are made after all layers have been created
831 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
832 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
833}
834
835void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
836{
837 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
838
839 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
840 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
841
842 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
843
844 DepthwiseConvolution2dDescriptor desc;
845 desc.m_BiasEnabled = false;
846 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
847 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000848 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100849 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100850
851 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
852 CHECK_VALID_SIZE(inputs.size(), 2, 3);
853 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
854 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100855 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
856 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000857
telsoa01c577f2c2018-08-31 09:22:23 +0100858 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
859 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
860
Matteo Martincigh747ef822018-12-18 09:26:39 +0000861 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100862 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
863 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000864
865 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100866 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
867 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
868
Matteo Martincigh747ef822018-12-18 09:26:39 +0000869 // Reshape weights as [ H, W, I, M ]
870 filterTensorInfo.SetShape({ filterHeight,
871 filterWidth,
872 inputTensorInfo.GetShape()[3],
873 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
874
875 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
876 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
877
Pablo Tellof0bd6832019-04-26 17:58:13 +0100878 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
879 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
880 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
881 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100882
Matteo Martincigh747ef822018-12-18 09:26:39 +0000883 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100884 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100885 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
886
887 if (inputs.size() == 3)
888 {
889 desc.m_BiasEnabled = true;
890 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000891 auto biasTensorAndData = CreateConstTensor(inputs[2],
892 biasTensorInfo,
893 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100894 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
895 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100896 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100897 layerName.c_str());
898 }
899 else
900 {
901 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
902 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100903 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100904 layerName.c_str());
905 }
906 BOOST_ASSERT(layer != nullptr);
907
telsoa01c577f2c2018-08-31 09:22:23 +0100908 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000909 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100910
911 // register the input connection slots for the layer, connections are made after all layers have been created
912 // only the tensors for the inputs are relevant, exclude the const tensors
913 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000914 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100915
jimfly01c25411c2018-11-14 17:47:22 +0000916 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100917 // register the output connection slots for the layer, connections are made after all layers have been created
918 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
919 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
920}
921
Keith Davis4cd29a02019-09-09 14:49:20 +0100922void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
923{
924 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
925
926 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +0100927 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +0100928
929 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
930 CHECK_VALID_SIZE(outputs.size(), 1);
931
932 armnn::IConnectableLayer* layer = nullptr;
933 auto layerName = boost::str(boost::format("Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
934
935 PermuteDescriptor desc;
936
Kevin May85d92602019-09-27 17:21:06 +0100937 if(inputs.size() == 2)
938 {
939 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
940 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
941
942 std::vector<unsigned int> permuteShape(permuteTensorInfo.GetNumElements());
943 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
944
945 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
946
947 desc = PermuteDescriptor(permutationVector);
948 }
949
Keith Davis4cd29a02019-09-09 14:49:20 +0100950 layer = m_Network->AddPermuteLayer(desc, layerName.c_str());
951
952 BOOST_ASSERT(layer != nullptr);
953
954 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
955 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
956
957 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
958 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
959
960 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
961 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
962}
963
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100964void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
965{
966 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
967
968 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
969 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
970
971 TransposeConvolution2dDescriptor desc;
972 desc.m_BiasEnabled = false;
973 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
974 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
975 desc.m_DataLayout = armnn::DataLayout::NHWC;
976
977 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100978 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100979
980 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
981 CHECK_VALID_SIZE(outputs.size(), 1);
982
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100983 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100984 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
985
986 // TfLite uses NHWC tensors
987 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
988 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
989
990 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
991 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
992
993 CalcPadding(inputHeight,
994 filterHeight,
995 desc.m_StrideY,
996 1, // DilationY
997 desc.m_PadTop,
998 desc.m_PadBottom,
999 options->padding);
1000
1001 CalcPadding(inputWidth,
1002 filterWidth,
1003 desc.m_StrideX,
1004 1, // DilationX
1005 desc.m_PadLeft,
1006 desc.m_PadRight,
1007 options->padding);
1008
1009 auto filterTensorAndData = CreateConstTensor(inputs[1],
1010 filterTensorInfo,
1011 armnn::Optional<armnn::PermutationVector&>());
1012
1013 armnn::IConnectableLayer* layer = nullptr;
1014 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
1015
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001016 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1017 filterTensorAndData.first,
1018 EmptyOptional(),
1019 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001020
1021 BOOST_ASSERT(layer != nullptr);
1022
1023 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1024 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1025
1026 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1027 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001028 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001029
1030 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1031 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1032}
1033
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001034void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
1035{
1036 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1037}
1038
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001039void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
1040{
1041 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1042
1043 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1044 CHECK_VALID_SIZE(inputs.size(), 3);
1045
1046 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1047 CHECK_VALID_SIZE(outputs.size(), 1);
1048
1049 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1050 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1051
1052 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1053 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1054
1055 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1056 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1057
1058 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1059 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1060
1061 size_t step = 2;
1062 std::vector<std::pair<unsigned int, unsigned int>> crops;
1063 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1064 {
1065 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1066 }
1067
1068 armnn::BatchToSpaceNdDescriptor desc;
1069 desc.m_BlockShape = blockShape;
1070 desc.m_Crops = crops;
1071 desc.m_DataLayout = armnn::DataLayout::NHWC;
1072
1073 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1074
1075 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1076 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1077
1078 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1079
1080 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1081 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1082
1083 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1084 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1085}
1086
Matthew Jackson28c94572019-07-18 10:47:03 +01001087void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1088{
1089 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1090
1091 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1092 CHECK_VALID_SIZE(inputs.size(), 1);
1093
1094 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1095 CHECK_VALID_SIZE(outputs.size(), 1);
1096
1097 L2NormalizationDescriptor desc;
1098 desc.m_DataLayout = armnn::DataLayout::NHWC;
1099 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1100 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1101
1102 BOOST_ASSERT(layer != nullptr);
1103
1104 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1105 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1106
1107 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1108 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1109
1110 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1111 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1112}
1113
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001114void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1115{
1116 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1117}
1118
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001119void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1120{
1121 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1122
1123 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1124 CHECK_VALID_SIZE(inputs.size(), 2);
1125
1126 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1127 CHECK_VALID_SIZE(outputs.size(), 1);
1128
1129 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1130 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1131
1132 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1133 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1134
1135 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1136 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1137
1138 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1139 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1140 {
1141 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1142 }
1143 else
1144 {
1145 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1146 }
1147
1148 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1149 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1150}
1151
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001152void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1153{
1154 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1155
1156 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1157 CHECK_VALID_SIZE(inputs.size(), 2);
1158
1159 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1160 CHECK_VALID_SIZE(outputs.size(), 1);
1161
1162 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1163 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1164
1165 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1166 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1167
1168 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1169 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1170
1171 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1172 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1173 {
1174 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1175 }
1176 else
1177 {
1178 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1179 }
1180
1181 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1182 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1183}
1184
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001185void TfLiteParser::ParsePool(size_t subgraphIndex,
1186 size_t operatorIndex,
1187 PoolingAlgorithm algorithm)
1188{
1189 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1190
1191 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1192 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1193
1194 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1195
1196 std::string layerName;
1197
1198 switch (algorithm)
1199 {
1200 case PoolingAlgorithm::Average:
1201 layerName =
1202 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1203 break;
1204 case PoolingAlgorithm::Max:
1205 layerName =
1206 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1207 break;
1208 default:
1209 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1210 }
1211
1212 Pooling2dDescriptor desc;
1213
1214 desc.m_PoolType = algorithm;
1215 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1216 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1217 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1218 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1219 desc.m_PaddingMethod = PaddingMethod::Exclude;
1220 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001221 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001222
1223 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1224 CHECK_VALID_SIZE(inputs.size(), 1);
1225 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1226
1227 // assuming input is NHWC
1228 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1229 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1230
Pablo Tellof0bd6832019-04-26 17:58:13 +01001231 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1232 desc.m_PadTop, desc.m_PadBottom, options->padding);
1233 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1234 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001235
1236 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1237 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001238
1239 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1240
1241 BOOST_ASSERT(layer != nullptr);
1242
jimfly01c25411c2018-11-14 17:47:22 +00001243 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1244 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001245
1246 // register the input connection slots for the layer, connections are made after all layers have been created
1247 // only the tensors for the inputs are relevant, exclude the const tensors
1248 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001249 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001250
jimfly01c25411c2018-11-14 17:47:22 +00001251 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001252 // register the output connection slots for the layer, connections are made after all layers have been created
1253 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1254 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1255}
1256
telsoa01c577f2c2018-08-31 09:22:23 +01001257void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1258{
1259 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1260 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1261 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1262
1263 SoftmaxDescriptor desc;
1264 desc.m_Beta = options->beta;
1265
1266 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1267 CHECK_VALID_SIZE(inputs.size(), 1);
1268 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1269 CHECK_VALID_SIZE(outputs.size(), 1);
1270
1271 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1272 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1273
1274 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1275 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1276
1277 // register the input connection slots for the layer, connections are made after all layers have been created
1278 // only the tensors for the inputs are relevant, exclude the const tensors
1279 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1280 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1281
1282 // register the output connection slots for the layer, connections are made after all layers have been created
1283 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1284 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1285}
1286
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001287void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1288{
1289 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1290
1291 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1292 CHECK_VALID_SIZE(inputs.size(), 3);
1293
1294 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1295 CHECK_VALID_SIZE(outputs.size(), 1);
1296
1297 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1298 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1299
1300 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1301 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1302
1303 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1304 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1305
1306 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1307 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1308
1309 size_t step = 2;
1310 std::vector<std::pair<unsigned int, unsigned int>> padList;
1311 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1312 {
1313 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1314 }
1315
1316 armnn::SpaceToBatchNdDescriptor desc;
1317 desc.m_BlockShape = blockShape;
1318 desc.m_PadList = padList;
1319 desc.m_DataLayout = armnn::DataLayout::NHWC;
1320
1321 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1322
1323 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1324 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1325
1326 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1327
1328 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1329 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1330
1331 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1332 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1333}
1334
telsoa01c577f2c2018-08-31 09:22:23 +01001335armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1336 const armnn::TensorInfo & inputTensorInfo)
1337{
1338 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1339 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1340 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1341
1342 if (inputTensorInfo.GetNumDimensions() > 4)
1343 {
1344 std::stringstream ss;
1345 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1346 << " shape:" << inputTensorInfo.GetShape() << " "
1347 << CHECK_LOCATION().AsString();
1348 throw ParseException(ss.str());
1349 }
1350
1351 if (squeezeDims.empty())
1352 {
1353 squeezeDims.assign(dimensionSequence,
1354 dimensionSequence+inputTensorInfo.GetNumDimensions());
1355 }
1356
1357 std::vector<uint32_t> outputDims;
1358 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1359 {
1360 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1361 auto currentDimension = inputTensorInfo.GetShape()[i];
1362 if (skipSqueeze || currentDimension != 1)
1363 {
1364 outputDims.push_back(currentDimension);
1365 }
1366 }
1367
1368 if (outputDims.size() > 4)
1369 {
1370 std::stringstream ss;
1371 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1372 << " shape:" << inputTensorInfo.GetShape() << " "
1373 << CHECK_LOCATION().AsString();
1374 throw ParseException(ss.str());
1375 }
1376
1377 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1378 outputDims.data());
1379
1380 // we need to preserve the tensor type and the quantization data as well
1381 TensorInfo outTensorInfo = inputTensorInfo;
1382 outTensorInfo.SetShape(outShape);
1383
1384 return outTensorInfo;
1385}
1386
1387void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1388{
1389 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1390
1391 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1392 CHECK_VALID_SIZE(inputs.size(), 1);
1393
1394 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1395 CHECK_VALID_SIZE(outputs.size(), 1);
1396
1397 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1398 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1399
1400 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1401 armnn::TensorInfo outputTensorInfo =
1402 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1403 inputTensorInfo);
1404
1405 ReshapeDescriptor reshapeDesc;
1406 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1407
1408 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1409 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1410 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1411
1412 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1413 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1414
1415 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1416 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1417}
1418
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001419void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1420{
1421 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1422
1423 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1424 CHECK_VALID_SIZE(inputs.size(), 4);
1425
1426 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1427 CHECK_VALID_SIZE(outputs.size(), 1);
1428
1429 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1430 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1431
1432 StridedSliceDescriptor desc;
1433 desc.m_BeginMask = options->begin_mask;
1434 desc.m_EllipsisMask = options->ellipsis_mask;
1435 desc.m_EndMask = options->end_mask;
1436 desc.m_NewAxisMask = options->new_axis_mask;
1437 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1438 desc.m_DataLayout = armnn::DataLayout::NHWC;
1439
1440 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1441 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1442
1443 std::vector<int> begin(beginTensorInfo.GetNumElements());
1444 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1445
1446 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1447 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1448
1449 std::vector<int> end(endTensorInfo.GetNumElements());
1450 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1451
1452 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1453 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1454
1455 std::vector<int> stride(strideTensorInfo.GetNumElements());
1456 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1457
1458 desc.m_Begin = begin;
1459 desc.m_End = end;
1460 desc.m_Stride = stride;
1461
1462 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1463 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1464
1465 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1466 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1467
1468 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1469 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1470
1471 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1472 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1473}
1474
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001475void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1476{
1477 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1478
1479 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1480 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1481
1482 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1483 CHECK_VALID_SIZE(inputs.size(), 2);
1484
1485 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1486 CHECK_VALID_SIZE(outputs.size(), 1);
1487
1488 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1489 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1490
1491 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1492 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1493
1494 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1495 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1496
1497 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1498 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1499 {
1500 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1501 }
1502 else
1503 {
1504 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1505 }
1506
1507 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1508
1509 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1510 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1511}
1512
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001513void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1514{
1515 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1516
1517 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1518 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1519
1520 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1521 CHECK_VALID_SIZE(inputs.size(), 2);
1522
1523 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1524 CHECK_VALID_SIZE(outputs.size(), 1);
1525
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001526 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1527 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1528
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001529 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1530 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1531
1532 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1533 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1534
1535 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001536 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1537 {
1538 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1539 }
1540 else
1541 {
1542 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1543 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001544
1545 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1546
1547 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1548 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1549}
1550
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001551void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1552{
1553 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1554
1555 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1556 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1557
1558 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1559 CHECK_VALID_SIZE(inputs.size(), 2);
1560
1561 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1562 CHECK_VALID_SIZE(outputs.size(), 1);
1563
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001564 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1565 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1566
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001567 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1568 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1569
1570 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1571 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1572
1573 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001574 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1575 {
1576 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1577 }
1578 else
1579 {
1580 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1581 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001582
1583 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1584
1585 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1586 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1587}
1588
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001589void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1590{
1591 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1592
1593 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1594
1595 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1596 CHECK_VALID_SIZE(outputs.size(), 1);
1597
1598 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1599 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1600
1601 armnn::MeanDescriptor desc;
1602 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1603 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1604 desc.m_Axis = axis;
1605
1606 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1607 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1608
1609 desc.m_KeepDims =
1610 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1611 true : false;
1612
1613 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1614 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1615
1616 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1617
1618 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1619 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1620
1621 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1622 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1623}
1624
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001625void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1626{
1627 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1628
1629 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1630
1631 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1632 CHECK_VALID_SIZE(outputs.size(), 1);
1633
1634 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1635 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1636
1637 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1638 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1639
1640 size_t step = 2;
1641 armnn::PadDescriptor desc;
1642 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1643 {
1644 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1645 }
1646
1647 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1648 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1649
1650 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1651 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1652
1653 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1654 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1655
1656 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1657 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1658}
1659
Finn Williamsc42c3842019-01-22 14:18:11 +00001660
Sadik Armagan58f39192018-09-17 14:14:39 +01001661void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1662{
Finn Williamsc42c3842019-01-22 14:18:11 +00001663 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001664}
1665
1666void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1667{
Finn Williamsc42c3842019-01-22 14:18:11 +00001668 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1669}
Sadik Armagan58f39192018-09-17 14:14:39 +01001670
Finn Williamsc42c3842019-01-22 14:18:11 +00001671void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1672{
1673 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1674}
1675
Nina Drozd99851762019-04-09 09:37:38 +01001676void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1677{
1678 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1679}
1680
Finn Williamsc42c3842019-01-22 14:18:11 +00001681
1682void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1683{
1684 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001685 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1686 boost::ignore_unused(operatorPtr);
1687
1688 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1689 CHECK_VALID_SIZE(inputs.size(), 1);
1690
1691 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1692 CHECK_VALID_SIZE(outputs.size(), 1);
1693
Finn Williamsc42c3842019-01-22 14:18:11 +00001694 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001695 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001696 activationDesc.m_Function = activationType;
1697
1698 switch (activationType)
1699 {
1700 case ActivationFunction::ReLu:
1701 {
1702 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1703 break;
1704 }
1705 case ActivationFunction::BoundedReLu:
1706 {
1707 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1708 activationDesc.m_A = 6.0f;
1709 activationDesc.m_B = 0.0f;
1710 break;
1711 }
1712 case ActivationFunction::Sigmoid:
1713 {
1714 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1715 break;
1716 }
Nina Drozd99851762019-04-09 09:37:38 +01001717 case ActivationFunction::TanH:
1718 {
1719 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1720 activationDesc.m_A = 1.0f;
1721 activationDesc.m_B = 1.0f;
1722 break;
1723 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001724 default:
1725 {
1726 throw ParseException(
1727 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1728 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1729 }
1730 }
1731
1732 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001733
1734 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1735 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1736
1737 // register the input connection slots for the layer, connections are made after all layers have been created
1738 // only the tensors for the inputs are relevant, exclude the const tensors
1739 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1740 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1741
1742 // register the output connection slots for the layer, connections are made after all layers have been created
1743 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1744 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1745}
Sadikb94967b2018-09-19 15:30:00 +01001746armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1747 const std::vector<int32_t> & targetDimsIn)
1748{
1749 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1750 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1751
1752 if (stretchDim != targetDimsIn.end())
1753 {
1754 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1755 {
1756 throw ParseException(
1757 boost::str(
1758 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1759 }
1760
1761 auto targetNumElements =
1762 boost::numeric_cast<unsigned int>(
1763 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1764
1765 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1766 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1767 }
1768
1769 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1770
1771 TensorInfo reshapeInfo = inputTensorInfo;
1772 reshapeInfo.SetShape(outputShape);
1773
1774 return reshapeInfo;
1775}
1776
1777void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1778{
1779 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1780
1781 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001782
1783 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1784 CHECK_VALID_SIZE(outputs.size(), 1);
1785
1786 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1787 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1788
1789 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001790 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1791 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001792 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1793
kevmay0171972a82018-12-17 14:28:03 +00001794 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001795 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1796 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001797 {
1798 std::stringstream ss;
1799 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001800 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001801 << " does not equal output shape "
1802 << actualOutputTensorInfo.GetShape()
1803 << ": "
1804 << CHECK_LOCATION().AsString();
1805 throw ParseException(ss.str());
1806 }
1807
Sadikb94967b2018-09-19 15:30:00 +01001808 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001809 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001810
1811 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1812 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001813 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001814
1815 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1816 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1817
1818 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1819 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1820}
1821
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001822void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1823{
1824 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1825
1826 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1827 CHECK_VALID_SIZE(inputs.size(), 2);
1828
1829 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1830 CHECK_VALID_SIZE(outputs.size(), 1);
1831
1832 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1833
1834 // Data for the parsed tensor args (size) must be stored locally.
1835 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1836
1837 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1838 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1839
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001840 ResizeDescriptor desc;
1841 desc.m_Method = armnn::ResizeMethod::Bilinear;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001842 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001843 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1844 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001845
1846 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001847 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001848
1849 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1850 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1851
1852 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1853 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1854
1855 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1856 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1857}
1858
Sadik Armagan479045b2018-10-01 11:51:37 +01001859void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1860{
1861 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1862
1863 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1864 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1865
1866 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1867
1868 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1869 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1870 CHECK_VALID_SIZE(outputs.size(), 1);
1871
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001872 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1873 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001874
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001875 const unsigned int concatDimInput = static_cast<unsigned int>(
1876 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001877
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001878 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1879 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001880
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001881 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001882
1883 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1884 {
1885 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1886
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001887 // This set up concatDescriptor view origin
1888 armnnUtils::ProcessConcatInputTensorInfo(
1889 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001890 }
1891
1892 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01001893 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01001894
1895 BOOST_ASSERT(layer != nullptr);
1896
1897 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1898 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001899
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001900 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001901
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001902 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001903
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001904 // add fused activation layer
1905 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001906
Sadik Armagan479045b2018-10-01 11:51:37 +01001907 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1908 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1909}
1910
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001911void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1912{
1913 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1914
1915 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1916 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1917
1918 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1919
1920 FullyConnectedDescriptor desc;
1921 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001922 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001923
1924 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1925 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1926 CHECK_VALID_SIZE(outputs.size(), 1);
1927
1928 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1929
1930 // Fully Connected Layer accepts two dimensional weights input
1931 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1932 if (weightsDimension != 2)
1933 {
1934 throw ParseException(
1935 boost::str(
1936 boost::format(
1937 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1938 "Node %2%")
1939 % weightsDimension
1940 % CHECK_LOCATION().AsString()));
1941 }
1942
Matteo Martincigh747ef822018-12-18 09:26:39 +00001943 auto filterTensorAndData = CreateConstTensor(inputs[1],
1944 filterTensorInfo,
1945 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001946 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001947 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1948
1949 if (inputs.size() == 3)
1950 {
1951 desc.m_BiasEnabled = true;
1952 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001953 auto biasTensorAndData = CreateConstTensor(inputs[2],
1954 biasTensorInfo,
1955 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001956 layer = m_Network->AddFullyConnectedLayer(desc,
1957 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001958 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001959 layerName.c_str());
1960 }
1961 else
1962 {
1963 layer = m_Network->AddFullyConnectedLayer(desc,
1964 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001965 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001966 layerName.c_str());
1967 }
1968 BOOST_ASSERT(layer != nullptr);
1969
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001970 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1971
1972 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1973
1974 if (inputTensorInfo.GetNumDimensions() > 2)
1975 {
1976 // Add reshape to flatten to 2D [batch_size, input_size],
1977 // where "input_size" corresponds to the number of inputs to the layer,
1978 // matching the second dimension of weights,
1979 // and "batch_size" is calculated by dividing the number of elements by "input_size".
1980 std::vector<unsigned int> reshapedDimensions(2);
1981 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
1982 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
1983
1984 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
1985 {
1986 throw ParseException(
1987 boost::str(
1988 boost::format(
1989 "Failed to deduce input tensor shape from filter size %1%")
1990 % reshapedDimensions[1]
1991 % CHECK_LOCATION().AsString()));
1992 }
1993
1994 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
1995 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
1996
1997 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
1998 armnn::ReshapeDescriptor desc;
1999 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2000 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2001
2002 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2003 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2004
2005 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2006 }
2007 else
2008 {
2009 // register the input connection slot for the layer
2010 // only the tensors for the inputs are relevant, exclude the const tensors
2011 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2012 }
2013
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002014 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2015 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2016
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002017 // we need to add the activation layer and fortunately we don't need to care about the data layout
2018 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2019 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002020
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002021 // register the output connection slots for the layer, connections are made after all layers have been created
2022 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2023 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2024}
2025
keidav011b3e2ea2019-02-21 10:07:37 +00002026void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
2027{
2028 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2029
2030 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2031
2032 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2033 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2034 CHECK_VALID_SIZE(outputs.size(), 4);
2035
2036 // Obtain custom options from flexbuffers
2037 auto custom_options = operatorPtr->custom_options;
2038 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2039
2040 // Obtain descriptor information from tf lite
2041 DetectionPostProcessDescriptor desc;
2042 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2043 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2044 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2045 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2046 desc.m_NumClasses = m["num_classes"].AsUInt32();
2047 desc.m_ScaleH = m["h_scale"].AsFloat();
2048 desc.m_ScaleW = m["w_scale"].AsFloat();
2049 desc.m_ScaleX = m["x_scale"].AsFloat();
2050 desc.m_ScaleY = m["y_scale"].AsFloat();
2051
keidav0107d58c72019-02-26 11:57:39 +00002052 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002053 {
keidav0107d58c72019-02-26 11:57:39 +00002054 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002055 }
2056 if (!(m["detections_per_class"].IsNull()))
2057 {
2058 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2059 }
2060
2061 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2062 {
2063 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2064 "must be positive and less than or equal to 1.");
2065 }
2066
2067 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2068 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2069 armnn::Optional<armnn::PermutationVector&>());
2070
2071 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2072 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2073 layerName.c_str());
2074
2075 BOOST_ASSERT(layer != nullptr);
2076
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002077 // The model does not specify the output shapes.
2078 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2079 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2080 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2081 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2082 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2083 m_OverridenOutputShapes.push_back({ 1 });
2084
keidav011b3e2ea2019-02-21 10:07:37 +00002085 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2086 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002087 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002088 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2089 }
2090
2091 // Register the input connection slots for the layer, connections are made after all layers have been created
2092 // only the tensors for the inputs are relevant, exclude the const tensors
2093 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2094 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2095
2096 // Register the output connection slots for the layer, connections are made after all layers have been created
2097 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2098 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2099 outputTensorIndexes[1],
2100 outputTensorIndexes[2],
2101 outputTensorIndexes[3]});
2102}
2103
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002104/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2105void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2106{
2107 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2108
2109 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2110 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2111 CHECK_VALID_SIZE(outputs.size(), 1);
2112
2113 if (inputs.size() < 1)
2114 {
2115 throw ParseException("Pack must have at least one input.");
2116 }
2117
2118 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2119 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2120
2121 StackDescriptor desc;
2122 desc.m_Axis = static_cast<uint32_t>(options->axis);
2123 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2124
2125 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2126 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2127 desc.m_InputShape = inputTensorInfo.GetShape();
2128
2129 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2130 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2131
2132 BOOST_ASSERT(layer != nullptr);
2133
2134 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2135 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2136
2137 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2138 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2139
2140 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2141 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2142}
2143
Nina Drozd200e3802019-04-15 09:47:39 +01002144void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2145{
2146 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2147
2148 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2149 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2150
2151 // This unpackAxis indicates the axis to unpack
2152 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2153
2154 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2155 CHECK_VALID_SIZE(inputs.size(), 1);
2156
2157 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002158
2159 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2160 {
2161 throw ParseException(
2162 boost::str(
2163 boost::format(
2164 "The unpack axis: %1% cannot be greater than or equal to "
2165 "the number of input dimension %2% %3%")
2166 % unpackAxis
2167 % inputTensorInfo.GetNumDimensions()
2168 % CHECK_LOCATION().AsString()));
2169 }
2170
Nina Drozd200e3802019-04-15 09:47:39 +01002171 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2172 // If num is not defined, automatically infer from the length of the dimension axis.
2173 if(unpackNum == 0)
2174 {
2175 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2176 }
2177
2178 // If unpack number cannot be inferred and is still zero, throw ParseException.
2179 if(unpackNum == 0)
2180 {
2181 throw ParseException("Number to unpack must greater than zero.");
2182 }
2183
2184 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2185 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2186
2187 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2188 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2189
2190 // Add current input shape to unpackDimSizes
2191 for (unsigned int i = 0; i < inputDimSize; ++i)
2192 {
2193 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2194 }
2195
2196 if (unpackDimSizes[unpackAxis] != unpackNum)
2197 {
2198 throw ParseException("Number to unpack must be the same as length of the dimension to "
2199 "unpack along.");
2200 }
2201
2202 unpackDimSizes[unpackAxis] /= unpackNum;
2203
2204 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2205 for (unsigned int j = 0; j < unpackNum; ++j)
2206 {
2207 // Set the size of the views.
2208 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2209 {
2210 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2211 }
2212 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2213 }
2214
2215 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2216 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2217
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002218 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2219 unpackDimSizes.data());
2220
Nina Drozd200e3802019-04-15 09:47:39 +01002221 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2222 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2223
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002224 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2225 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2226 {
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002227 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002228 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2229 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002230 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002231 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2232
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002233 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2234 outputTensorInfo.GetDataType(),
2235 outputTensorInfo.GetQuantizationScale(),
2236 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002237 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2238
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002239 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002240
2241 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2242 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2243 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2244 }
Nina Drozd200e3802019-04-15 09:47:39 +01002245}
2246
Nina Drozd0324f482019-04-08 10:52:10 +01002247void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2248{
2249 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2250
2251 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2252 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2253
2254 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2255
Nina Drozd200e3802019-04-15 09:47:39 +01002256 // If number of splits cannot be inferred and is zero, throw ParseException.
2257 if(numSplits == 0)
2258 {
2259 throw ParseException("Number to splits must greater than zero.");
2260 }
2261
Nina Drozd0324f482019-04-08 10:52:10 +01002262 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2263 CHECK_VALID_SIZE(inputs.size(), 2);
2264 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2265 CHECK_VALID_SIZE(outputs.size(), numSplits);
2266
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002267 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2268 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002269
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002270 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2271 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2272 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2273
2274 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2275 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002276
2277 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2278 if (splitDim == 0 || splitDim == 2)
2279 {
2280 throw ParseException(
2281 boost::str(
2282 boost::format(
2283 "Dimension %1% for split is not supported by Armnn. %2%")
2284 % splitDim
2285 % CHECK_LOCATION().AsString()));
2286 }
2287
2288 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002289 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002290 {
2291 throw ParseException(
2292 boost::str(
2293 boost::format(
2294 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002295 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002296 % inputTensorInfo.GetNumDimensions()
2297 % MaxNumOfTensorDimensions
2298 % CHECK_LOCATION().AsString()));
2299 }
2300
2301 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2302
2303 // Add current input shape to splitterDimSizes
2304 for (unsigned int i = 0; i < inputDimSize; ++i)
2305 {
2306 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2307 }
2308
2309 if (splitterDimSizes[splitDim] % numSplits != 0)
2310 {
2311 throw ParseException("Number of splits must evenly divide the dimension");
2312 }
2313 splitterDimSizes[splitDim] /= numSplits;
2314
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002315 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002316 for (unsigned int j = 0; j < numSplits; ++j)
2317 {
2318 // Set the size of the views.
2319 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2320 {
2321 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2322 }
2323 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2324 }
2325
2326 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2327 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2328
2329 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002330 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002331
Nina Drozd0324f482019-04-08 10:52:10 +01002332 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2333 {
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002334 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k]);
2335 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002336 }
2337
2338 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2339 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2340}
2341
Sadik Armagan58f39192018-09-17 14:14:39 +01002342armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2343 unsigned int outputSlot,
2344 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002345{
2346 ActivationDescriptor activationDesc;
2347 std::string layerName = prevLayer->GetName();
2348
2349 switch(activationType)
2350 {
2351 case tflite::ActivationFunctionType_NONE:
2352 {
2353 // this is a no-op: return previous layer
2354 return prevLayer;
2355 }
2356 case tflite::ActivationFunctionType_RELU:
2357 {
2358 activationDesc.m_Function = ActivationFunction::ReLu;
2359 layerName += ":RELU";
2360 break;
2361 }
2362 case tflite::ActivationFunctionType_RELU6:
2363 {
2364 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2365 activationDesc.m_A = 6.0f;
2366 activationDesc.m_B = 0.0f;
2367 layerName += ":RELU6";
2368 break;
2369 }
2370 case tflite::ActivationFunctionType_TANH:
2371 {
2372 activationDesc.m_Function = ActivationFunction::TanH;
2373 activationDesc.m_A = 1.0f;
2374 activationDesc.m_B = 1.0f;
2375 layerName += ":TANH";
2376 break;
2377 }
2378
2379 // I only put these here as a reminder what others we could support
2380 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2381 case tflite::ActivationFunctionType_SIGN_BIT:
2382 default:
2383 {
2384 throw ParseException(
2385 boost::str(
2386 boost::format("TfLite parser doesn't suppport fused activation: "
2387 "%1%/%2% %3% ") %
2388 activationType %
2389 tflite::EnumNameActivationFunctionType(activationType) %
2390 CHECK_LOCATION().AsString()));
2391
2392 }
2393 }
2394
2395 IConnectableLayer* activationLayer =
2396 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2397
2398 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2399 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2400 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2401 return activationLayer;
2402}
2403
2404TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2405{
2406 if (fileName == nullptr)
2407 {
2408 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2409 CHECK_LOCATION().AsString()));
2410 }
2411 boost::system::error_code errorCode;
2412 boost::filesystem::path pathToFile(fileName);
2413 if (!boost::filesystem::exists(pathToFile, errorCode))
2414 {
2415 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2416 fileName %
2417 errorCode %
2418 CHECK_LOCATION().AsString()));
2419 }
2420 std::ifstream file(fileName, std::ios::binary);
2421 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2422 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2423 fileContent.size());
2424}
2425
2426TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2427{
2428 if (binaryContent == nullptr)
2429 {
2430 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2431 CHECK_LOCATION().AsString()));
2432 }
2433 flatbuffers::Verifier verifier(binaryContent, len);
2434 if (verifier.VerifyBuffer<tflite::Model>() == false)
2435 {
2436 throw ParseException(
2437 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2438 "flatbuffers format. size:%1% %2%") %
2439 len %
2440 CHECK_LOCATION().AsString()));
2441 }
2442 return tflite::UnPackModel(binaryContent);
2443}
2444
2445TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2446 size_t subgraphIndex,
2447 size_t operatorIndex)
2448{
2449 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2450
Derek Lambertiff05cc52019-04-26 13:05:17 +01002451 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2452 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002453
2454 size_t inputCount = operatorPtr->inputs.size();
2455 TensorRawPtrVector result(inputCount);
2456 for (size_t i=0; i<inputCount; ++i)
2457 {
2458 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002459 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002460 }
2461 return result;
2462}
2463
2464TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2465 size_t subgraphIndex,
2466 size_t operatorIndex)
2467{
2468 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2469
Derek Lambertiff05cc52019-04-26 13:05:17 +01002470 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2471 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002472
2473 size_t outputCount = operatorPtr->outputs.size();
2474 TensorRawPtrVector result(outputCount);
2475 for (size_t i=0; i<outputCount; ++i)
2476 {
2477 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2478 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002479 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002480 }
2481 return result;
2482}
2483
2484TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2485 size_t subgraphIndex)
2486{
2487 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002488 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002489
Derek Lambertiff05cc52019-04-26 13:05:17 +01002490 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002491 TensorIdRawPtrVector result(inputCount);
2492 for (size_t i=0; i<inputCount; ++i)
2493 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002494 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002495 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002496 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002497 }
2498 return result;
2499}
2500
2501TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2502 size_t subgraphIndex)
2503{
2504 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002505 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002506
Derek Lambertiff05cc52019-04-26 13:05:17 +01002507 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002508 TensorIdRawPtrVector result(outputCount);
2509 for (size_t i=0; i<outputCount; ++i)
2510 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002511 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2512 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002513 }
2514 return result;
2515}
2516
2517std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2518 size_t subgraphIndex,
2519 size_t operatorIndex)
2520{
2521 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002522 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2523 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002524 return operatorPtr->inputs;
2525}
2526
2527std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2528 size_t subgraphIndex,
2529 size_t operatorIndex)
2530{
2531 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002532 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2533 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002534 return operatorPtr->outputs;
2535}
2536
2537void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2538 size_t operatorIndex,
2539 IConnectableLayer* layer,
2540 const std::vector<unsigned int>& tensorIndexes)
2541{
2542 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2543 BOOST_ASSERT(layer != nullptr);
2544 if (tensorIndexes.size() != layer->GetNumInputSlots())
2545 {
2546 throw ParseException(
2547 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2548 " for subgraph:%3% operator index:%4% %5%") %
2549 tensorIndexes.size() %
2550 layer->GetNumInputSlots() %
2551 subgraphIndex %
2552 operatorIndex %
2553 CHECK_LOCATION().AsString()));
2554 }
2555
2556 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2557 {
2558 unsigned int tensorIndex = tensorIndexes[slotIndex];
2559 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2560 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2561 }
2562}
2563
2564void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2565 size_t operatorIndex,
2566 IConnectableLayer* layer,
2567 const std::vector<unsigned int>& tensorIndexes)
2568{
2569 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2570 BOOST_ASSERT(layer != nullptr);
2571 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2572 {
2573 throw ParseException(
2574 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2575 " for subgraph:%3% operator index:%4% %5%") %
2576 tensorIndexes.size() %
2577 layer->GetNumOutputSlots() %
2578 subgraphIndex %
2579 operatorIndex %
2580 CHECK_LOCATION().AsString()));
2581 }
2582
2583 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2584 {
2585 unsigned int tensorIndex = tensorIndexes[slotIndex];
2586 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2587 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2588 }
2589}
2590
2591void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2592{
2593 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2594
2595 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2596 for (auto const & tensorIdAndPtr : inputs)
2597 {
2598 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2599 IConnectableLayer* layer =
2600 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2601
2602 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2603 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2604
2605 RegisterOutputSlots(subgraphIndex,
2606 VIRTUAL_OPERATOR_ID,
2607 layer,
2608 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2609 }
2610}
2611
2612void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2613{
2614 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2615
2616 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2617 for (auto const & tensorIdAndPtr : outputs)
2618 {
2619 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2620 IConnectableLayer* layer =
2621 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2622
2623 RegisterInputSlots(subgraphIndex,
2624 VIRTUAL_OPERATOR_ID,
2625 layer,
2626 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2627 }
2628}
2629
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002630void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2631{
2632 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2633
Derek Lambertiff05cc52019-04-26 13:05:17 +01002634 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002635 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2636 {
2637 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2638 {
2639 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2640 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2641 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002642 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002643 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2644 auto tensorAndData = CreateConstTensor(tensorPtr,
2645 tensorInfo,
2646 armnn::Optional<armnn::PermutationVector&>());
2647
2648 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2649 IConnectableLayer *layer =
2650 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2651
2652 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2653 RegisterOutputSlots(subgraphIndex,
2654 VIRTUAL_OPERATOR_ID,
2655 layer,
2656 { tensorIndex });
2657
2658 }
2659 }
2660 }
2661}
2662
telsoa01c577f2c2018-08-31 09:22:23 +01002663// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2664TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2665{
2666 CHECK_BUFFER(model, bufferIndex);
2667 return model->buffers[bufferIndex].get();
2668}
2669
Matteo Martincigh747ef822018-12-18 09:26:39 +00002670template<typename T>
2671std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2672TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2673 TfLiteParser::TensorRawPtr tensorPtr,
2674 armnn::TensorInfo& tensorInfo,
2675 armnn::Optional<armnn::PermutationVector&> permutationVector)
2676{
2677 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2678 tensorPtr,
2679 tensorInfo,
2680 permutationVector);
2681 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2682 return std::make_pair(constData.first, std::move(storage));
2683}
2684
telsoa01c577f2c2018-08-31 09:22:23 +01002685std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2686TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002687 armnn::TensorInfo& tensorInfo,
2688 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002689{
2690 CHECK_TENSOR_PTR(tensorPtr);
2691 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2692 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2693
2694 switch (tensorInfo.GetDataType())
2695 {
2696 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002697 return CreateConstTensorAndStoreData<float>(bufferPtr,
2698 tensorPtr,
2699 tensorInfo,
2700 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002701 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002702 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2703 tensorPtr,
2704 tensorInfo,
2705 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002706 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002707 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2708 tensorPtr,
2709 tensorInfo,
2710 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002711 default:
2712 {
2713 std::stringstream errString;
2714 errString << "Unexpected datatype when creating const tensor: "
2715 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2716 << " shape:" << tensorInfo.GetShape()
2717 << CHECK_LOCATION().AsString();
2718 throw ParseException(errString.str());
2719 }
2720 }
2721}
2722
2723BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2724 const std::string& name) const
2725{
2726 CHECK_SUBGRAPH(m_Model, subgraphId);
2727 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2728 for (auto const & input : inputs)
2729 {
2730 if (input.second->name == name)
2731 {
2732 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2733 return std::make_pair(bindingId, ToTensorInfo(input.second));
2734 }
2735 }
2736
2737 std::stringstream bindings;
2738 for (auto const & input : inputs)
2739 {
2740 bindings << "'" << input.second->name << "' ";
2741 }
2742
2743 throw ParseException(
2744 boost::str(
2745 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2746 "Possible inputs are: [%3%] %4%") %
2747 subgraphId %
2748 name %
2749 bindings.str() %
2750 CHECK_LOCATION().AsString()));
2751}
2752
2753BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2754 const std::string& name) const
2755{
2756 CHECK_SUBGRAPH(m_Model, subgraphId);
2757 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002758 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002759 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002760 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002761 if (output.second->name == name)
2762 {
2763 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002764 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2765 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2766 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002767 }
2768 }
2769
2770 std::stringstream bindings;
2771 for (auto const & output : outputs)
2772 {
2773 bindings << "'" << output.second->name << "' ";
2774 }
2775
2776 throw ParseException(
2777 boost::str(
2778 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2779 "Possible outputs are: [%3%] %4%") %
2780 subgraphId %
2781 name %
2782 bindings.str() %
2783 CHECK_LOCATION().AsString()));
2784}
2785
2786size_t TfLiteParser::GetSubgraphCount() const
2787{
2788 return m_Model->subgraphs.size();
2789}
2790
2791std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2792{
2793 CHECK_SUBGRAPH(m_Model, subgraphId);
2794 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2795 std::vector<std::string> result;
2796 result.reserve(inputs.size());
2797 for (auto const & input : inputs)
2798 {
2799 result.push_back(input.second->name);
2800 }
2801 return result;
2802}
2803
2804std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2805{
2806 CHECK_SUBGRAPH(m_Model, subgraphId);
2807 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2808 std::vector<std::string> result;
2809 result.reserve(outputs.size());
2810 for (auto const & output : outputs)
2811 {
2812 result.push_back(output.second->name);
2813 }
2814 return result;
2815}
2816
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01002817ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01002818{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01002819 return new TfLiteParser(options);
telsoa01c577f2c2018-08-31 09:22:23 +01002820}
2821
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01002822ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01002823{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01002824 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +01002825}
2826
2827void ITfLiteParser::Destroy(ITfLiteParser* parser)
2828{
2829 delete parser;
2830}
2831
2832TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2833: m_FloatData(std::move(data))
2834, m_Uint8Data(nullptr)
2835, m_Int32Data(nullptr)
2836{
2837}
2838
2839TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2840: m_FloatData(nullptr)
2841, m_Uint8Data(std::move(data))
2842, m_Int32Data(nullptr)
2843{
2844}
2845
2846TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2847: m_FloatData(nullptr)
2848, m_Uint8Data(nullptr)
2849, m_Int32Data(std::move(data))
2850{
2851}
2852
2853} // armnnTfLiteParser