blob: 9a207409148f5ca5ddb34257d725b50df0b4476d [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026
27#include <fstream>
28#include <algorithm>
29#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010030#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000031#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010032
33using namespace armnn;
34using armnn::CheckLocation;
35namespace armnnTfLiteParser
36{
37namespace
38{
jimfly01c25411c2018-11-14 17:47:22 +000039
telsoa01c577f2c2018-08-31 09:22:23 +010040const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
41
42void CheckSubgraph(const TfLiteParser::ModelPtr & model,
43 size_t subgraphIndex,
44 const CheckLocation & location)
45{
46 if (model.get() == nullptr)
47 {
48 throw ParseException(
49 boost::str(
50 boost::format("%1% was called with invalid (null) model. "
51 "Possible reason is that the model is not yet loaded and Unpack(ed). "
52 "subgraph:%2% at %3%") %
53 location.m_Function %
54 subgraphIndex %
55 location.FileLine()));
56 }
57 else if (subgraphIndex >= model->subgraphs.size())
58 {
59 throw ParseException(
60 boost::str(
61 boost::format("%1% was called with an invalid subgraph index. "
62 "subgraph:%2% at %3%") %
63 location.m_Function %
64 subgraphIndex %
65 location.FileLine()));
66 }
67}
68
69#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
70 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
71
72void CheckModel(const TfLiteParser::ModelPtr & model,
73 size_t subgraphIndex,
74 size_t operatorIndex,
75 const CheckLocation & location)
76{
77 if (model.get() == nullptr)
78 {
79 throw ParseException(
80 boost::str(
81 boost::format("%1% was called with invalid (null) model. "
82 "Possible reason is that the model is not yet loaded and Unpack(ed). "
83 "subgraph:%2% operator:%3% at %4%") %
84 location.m_Function %
85 subgraphIndex %
86 operatorIndex %
87 location.FileLine()));
88 }
89 else if (subgraphIndex >= model->subgraphs.size())
90 {
91 throw ParseException(
92 boost::str(
93 boost::format("%1% was called with an invalid subgraph index. "
94 "subgraph:%2% operator:%3% at %4%") %
95 location.m_Function %
96 subgraphIndex %
97 operatorIndex %
98 location.FileLine()));
99 }
100 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
101 operatorIndex != VIRTUAL_OPERATOR_ID)
102 {
103 throw ParseException(
104 boost::str(
105 boost::format("%1% was called with an invalid operator index. "
106 "subgraph:%2% operator:%3% at %4%") %
107 location.m_Function %
108 subgraphIndex %
109 operatorIndex %
110 location.FileLine()));
111 }
112}
113
114#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
115 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
116
117void CheckTensor(const TfLiteParser::ModelPtr & model,
118 size_t subgraphIndex,
119 size_t tensorIndex,
120 const CheckLocation & location)
121{
122 // not checking model, because I assume CHECK_MODEL already run
123 // and checked that. An assert would do.
124 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
125
126 // also subgraph index should be checked by CHECK_MODEL so
127 // I only add an assert here
128 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
129
130 // the tensor index is the only one to check here
131 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
132 {
133 throw ParseException(
134 boost::str(
135 boost::format("%1% was called with an invalid tensor index. "
136 "subgraph:%2% tensor:%3% at %4%") %
137 location.m_Function %
138 subgraphIndex %
139 tensorIndex %
140 location.FileLine()));
141 }
142}
143
144#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
145 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
146
147void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
148 const CheckLocation & location)
149{
150 if (rawPtr == nullptr)
151 {
152 throw ParseException(
153 boost::str(
154 boost::format("%1% was called with a null tensor pointer. "
155 "at %2%") %
156 location.m_Function %
157 location.FileLine()));
158
159 }
160}
161
162#define CHECK_TENSOR_PTR(TENSOR_PTR) \
163 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
164
165void CheckBuffer(const TfLiteParser::ModelPtr & model,
166 size_t bufferIndex,
167 const CheckLocation & location)
168{
169 if (model.get() == nullptr)
170 {
171 throw ParseException(
172 boost::str(
173 boost::format("%1% was called with invalid (null) model. "
174 "Possible reason is that the model is not yet loaded and Unpack(ed). "
175 "buffer:%2% at %3%") %
176 location.m_Function %
177 bufferIndex %
178 location.FileLine()));
179 }
180 else if (bufferIndex >= model->buffers.size())
181 {
182 throw ParseException(
183 boost::str(
184 boost::format("%1% was called with an invalid buffer index. "
185 "buffer index:%2% at %3%") %
186 location.m_Function %
187 bufferIndex %
188 location.FileLine()));
189 }
190 else if (model->buffers[bufferIndex].get() == nullptr)
191 {
192 throw ParseException(
193 boost::str(
194 boost::format("The buffer #%1% is null. %3%") %
195 bufferIndex %
196 location.AsString()));
197 }
198}
199
200#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
201 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
202
203void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
204 const armnn::TensorInfo & tensorInfo,
205 uint32_t bufferId,
206 const CheckLocation & location)
207{
208 if (bufferPtr == nullptr)
209 {
210 throw ParseException(
211 boost::str(
212 boost::format("BufferPtr is null for buffer:%1%. %2%") %
213 bufferId %
214 location.AsString()));
215 }
216 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
217 tensorInfo.GetNumBytes() > bufferPtr->data.size())
218 {
219 std::stringstream ss;
220 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
221 << "For tensor: " << tensorInfo.GetShape()
222 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
223 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
224 throw ParseException(ss.str());
225 }
226}
227
228#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
229 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
230
231bool IsActivationSupported(tflite::ActivationFunctionType activationType)
232{
233 switch(activationType)
234 {
235 case tflite::ActivationFunctionType_NONE:
236 case tflite::ActivationFunctionType_RELU:
237 case tflite::ActivationFunctionType_RELU6:
238 case tflite::ActivationFunctionType_TANH:
239 {
240 return true;
241 }
242 default:
243 {
244 return false;
245 }
246 }
247}
248
249#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
250 do { \
251 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
252 { \
253 throw ParseException( \
254 boost::str( \
255 boost::format("TfLite parser doesn't suppport fused activation: " \
256 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
257 OPTION->fused_activation_function % \
258 tflite::EnumNameActivationFunctionType(\
259 OPTION->fused_activation_function) % \
260 __func__ % \
261 SUBGRAPH_INDEX % \
262 OPERATOR_INDEX % \
263 CHECK_LOCATION().FileLine())); \
264 } \
265 } while(false)
266
267
268std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
269{
270 std::vector<unsigned int> result;
271 result.reserve(in.size());
272 for (auto & i : in)
273 {
274 result.push_back(CHECKED_NON_NEGATIVE(i));
275 }
276 return result;
277}
278
279void CalcPadding(uint32_t inputSize,
280 uint32_t filterSize,
281 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100282 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100283 uint32_t& paddingFront,
284 uint32_t& paddingBack,
285 tflite::Padding padding)
286{
287 paddingFront = 0;
288 paddingBack = 0;
289 if (padding == tflite::Padding_SAME)
290 {
291 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100292 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
293 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100294 if (temp > inputSize)
295 {
296 paddingFront = (temp - inputSize) / 2;
297 paddingBack = (temp - inputSize) - paddingFront;
298 }
299 }
300}
301
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000302armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100303{
304 armnn::DataType type;
305 CHECK_TENSOR_PTR(tensorPtr);
306
307 switch (tensorPtr->type)
308 {
309 case tflite::TensorType_UINT8:
310 type = armnn::DataType::QuantisedAsymm8;
311 break;
312 case tflite::TensorType_FLOAT32:
313 type = armnn::DataType::Float32;
314 break;
315 case tflite::TensorType_INT32:
316 type = armnn::DataType::Signed32;
317 break;
318
319 default:
320 {
321 CheckLocation location = CHECK_LOCATION();
322 throw ParseException(
323 boost::str(
324 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
325 tensorPtr->type %
326 tflite::EnumNameTensorType(tensorPtr->type) %
327 tensorPtr->name %
328 location.AsString()));
329 }
330 }
331
332 float quantizationScale = 0.0f;
333 int32_t quantizationOffset = 0;
334
335 if (tensorPtr->quantization.get())
336 {
337 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
338 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
339
340 if (tensorPtr->quantization->scale.size() == 1)
341 {
342 quantizationScale = tensorPtr->quantization->scale[0];
343 }
344 if (tensorPtr->quantization->zero_point.size() == 1)
345 {
346 // NOTE: we lose precision here when converting from 64 bit to 32
347 // but this is what we support at the monent in ArmNN
348 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
349 }
350 }
351
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100352 std::vector<unsigned int> safeShape = shapes;
353 if (safeShape.size() == 0)
354 {
355 safeShape.push_back(1);
356 }
357
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // two statements (on purpose) for easier debugging:
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100359 armnn::TensorInfo result(static_cast<unsigned int>(safeShape.size()),
360 safeShape.data(),
telsoa01c577f2c2018-08-31 09:22:23 +0100361 type,
362 quantizationScale,
363 quantizationOffset);
364 return result;
365}
366
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000367armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
368{
369 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
370 return ToTensorInfo(tensorPtr, dimensions);
371}
372
telsoa01c577f2c2018-08-31 09:22:23 +0100373template<typename T>
374std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
375CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
376 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000377 armnn::TensorInfo& tensorInfo,
378 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100379{
380 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
381 BOOST_ASSERT_MSG(bufferPtr != nullptr,
382 boost::str(
383 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
384
385 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000386
387 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
388 {
389 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000390 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
391 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000392 }
393 else
394 {
395 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
396 }
397
telsoa01c577f2c2018-08-31 09:22:23 +0100398 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
399}
400
telsoa01c577f2c2018-08-31 09:22:23 +0100401armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
402{
403 // generate the binding id by shifting the tensor id by 8 bit
404 // and add the subgraph id, which allows 256 subgraphs
405 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
406}
407
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000408bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
409{
410 const unsigned int actualSize = actual.GetNumDimensions();
411 if (actualSize != expected.size())
412 {
413 return false;
414 }
415
416 for (unsigned int i = 0u; i < actualSize; i++)
417 {
418 if (expected[i] < 0 ||
419 actual[i] != static_cast<unsigned int>(expected[i]))
420 {
421 return false;
422 }
423 }
424
425 return true;
426}
427
telsoa01c577f2c2018-08-31 09:22:23 +0100428} // <anonymous>
429
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100430TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
431: m_Options(options)
432, m_Network(nullptr, nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +0100433, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
434{
435 // register supported operators
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100436 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
437 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
438 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
439 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
440 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
441 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
442 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
443 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
444 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
445 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
446 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
447 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
448 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
449 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
450 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
451 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
452 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
453 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
454 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
455 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
456 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
457 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
458 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
459 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
460 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
461 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
josh minorba424d22019-11-13 10:55:17 -0600462 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100463 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
464 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
465 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
466 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
467 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
468
469 // register supported custom operators
470 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100471}
472
473void TfLiteParser::ResetParser()
474{
475 m_Network = armnn::INetworkPtr(nullptr, nullptr);
476 m_Model = nullptr;
477 m_SubgraphConnections.clear();
478}
479
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200480void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
481 size_t operatorIndex,
482 IConnectableLayer *layer)
483{
484 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
485 BOOST_ASSERT(layer != nullptr);
486
Derek Lambertiff05cc52019-04-26 13:05:17 +0100487 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
488 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200489
490 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
491
492 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100493 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200494 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100495 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200496
497 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
498 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
499
500 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
501 {
502 uint32_t id = reshapedInputId;
503 reshapedInputId = inputId;
504 inputId = id;
505
506 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
507 inputTensorInfo = ToTensorInfo(tensorPtr);
508 }
509
510 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
511
512 std::vector<unsigned> reshapedDim;
513 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
514 {
515 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
516 }
517
518 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
519 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
520
521 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
522
523 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
524 armnn::ReshapeDescriptor desc;
525 desc.m_TargetShape = reshapedTensorInfo.GetShape();
526 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
527
528 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
529 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
530
531 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
532
533 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
534 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
535}
536
telsoa01c577f2c2018-08-31 09:22:23 +0100537INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
538{
539 ResetParser();
540 m_Model = LoadModelFromFile(graphFile);
541 return CreateNetworkFromModel();
542}
543
544INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
545{
546 ResetParser();
547 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
548 return CreateNetworkFromModel();
549}
550
551INetworkPtr TfLiteParser::CreateNetworkFromModel()
552{
553 m_Network = INetwork::Create();
554 BOOST_ASSERT(m_Model.get() != nullptr);
555
556 bool failedToCreate = false;
557 std::stringstream errors;
558
559 if (m_Model->subgraphs.size() != 1)
560 {
561 throw ParseException(
562 boost::str(
563 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
564 m_Model->subgraphs.size() %
565 CHECK_LOCATION().AsString()));
566 }
567
568 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100569 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100570 {
571 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
572
573 size_t operatorIndex = 0;
574 for (OperatorPtr const & op : subgraph->operators)
575 {
576 try
577 {
telsoa01c577f2c2018-08-31 09:22:23 +0100578 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
579 auto builtinCode = opCodePtr->builtin_code;
580
581 if (builtinCode > tflite::BuiltinOperator_MAX)
582 {
583 throw ParseException(
584 boost::str(
585 boost::format("Operator code %1% is out of range 0-%2%. "
586 "subgraph:%3% operator idx:%4%. %5%") %
587 builtinCode %
588 tflite::BuiltinOperator_MAX %
589 subgraphIndex %
590 operatorIndex %
591 CHECK_LOCATION().AsString()));
592 }
593
594 // lookup and call the parser function
595 auto & parserFunction = m_ParserFunctions[builtinCode];
596 (this->*parserFunction)(subgraphIndex, operatorIndex);
597 }
598 catch (const ParseException& e)
599 {
600 failedToCreate = true;
601 std::stringstream errorString;
602
603 errorString << "Failed to parse operator #" << operatorIndex
604 << " within subgraph #" << subgraphIndex
605 << " error: " << e.what();
606 BOOST_LOG_TRIVIAL(error) << errorString.str();
607
608 errors << errorString.str() << "\n";
609 }
610 ++operatorIndex;
611 }
612
613 SetupInputLayers(subgraphIndex);
614 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200615 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100616
617 ++subgraphIndex;
618 }
619
620 if (failedToCreate)
621 {
622 // we can skip everything and let the outer exception handler deal with the error
623 throw ParseException(errors.str());
624 }
625
626 // establish the connections from the layer outputs to the inputs of the subsequent layers
627 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
628 {
629 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
630 {
631 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
632 {
633 for (size_t inputSlotIdx = 0;
634 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
635 ++inputSlotIdx)
636 {
637 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
638 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
639 }
640 }
641 }
642 }
643
644 return std::move(m_Network);
645}
646
647void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
648 size_t tensorIndex,
649 armnn::IOutputSlot* slot)
650{
651 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
652 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
653 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
654
655 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
656
657 // assuming there is only one producer for that tensor
658 if (tensorSlots.outputSlot != nullptr)
659 {
660 throw ParseException(boost::str(
661 boost::format("Another layer has already registered itself as the producer of "
662 "subgraph:%1% tensor:%2% %3%") %
663 subgraphIndex %
664 tensorIndex %
665 CHECK_LOCATION().AsString()));
666 }
667
668 tensorSlots.outputSlot = slot;
669}
670
671void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
672 size_t tensorIndex,
673 armnn::IInputSlot* slot)
674{
675 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
676 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
677 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
678
679 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
680 tensorSlots.inputSlots.push_back(slot);
681}
682
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100683void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
684{
685 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
686
687 // NOTE: By default we presume the custom operator is not supported
688 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
689
690 // Identify custom code defined for custom operator
691 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
692 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
693
694 // Find parser function that correspondes to custom code (if any)
695 auto iterator = m_CustomParserFunctions.find(customCode);
696 if (iterator != m_CustomParserFunctions.end())
697 {
698 customParserFunction = iterator->second;
699 }
700
701 // Run parser function
702 (this->*customParserFunction)(subgraphIndex, operatorIndex);
703}
704
telsoa01c577f2c2018-08-31 09:22:23 +0100705void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
706{
707 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100708
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100709 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
710
711 auto opcodeIndex = operatorPtr->opcode_index;
712 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
713
714 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
715 {
716 // Do not add StandInLayer, throw ParseException instead
717 throw ParseException(
718 boost::str(
719 boost::format("Operator not supported. "
720 "subgraph:%1% operator:%2% "
721 "opcode_index:%3% opcode:%4% / %5% %6%") %
722 subgraphIndex %
723 operatorIndex %
724 opcodeIndex %
725 opcode %
726 tflite::EnumNameBuiltinOperator(opcode) %
727 CHECK_LOCATION().AsString()));
728 }
729
730 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
731 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
732
733 const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
734 const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
735
736 StandInDescriptor descriptor(numInputs, numOutputs);
737 auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
738
739 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
740 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
741 for (unsigned int i = 0u; i < numOutputs; ++i)
742 {
743 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i]));
744 }
745
746 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
747 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
748
749 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
750 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100751}
752
telsoa01c577f2c2018-08-31 09:22:23 +0100753void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
754{
755 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
756
757 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
758 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
759
760 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
761
762 Convolution2dDescriptor desc;
763 desc.m_BiasEnabled = false;
764 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
765 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000766 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100767 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
768 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000769
telsoa01c577f2c2018-08-31 09:22:23 +0100770 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
771 CHECK_VALID_SIZE(inputs.size(), 2, 3);
772
773 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
774 CHECK_VALID_SIZE(outputs.size(), 1);
775
776 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
777 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
778
779 // assuming input is NHWC
780 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
781 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
782
783 // assuming the filter is OHWI : Output, H, W, Input
784 // which is essentially the same as NHWC
785 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
786 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
787
Pablo Tellof0bd6832019-04-26 17:58:13 +0100788 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
789 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
790 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
791 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100792
Matteo Martincigh747ef822018-12-18 09:26:39 +0000793 auto filterTensorAndData = CreateConstTensor(inputs[1],
794 filterTensorInfo,
795 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100796 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100797
798 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
799
800 if (inputs.size() == 3)
801 {
802 desc.m_BiasEnabled = true;
803 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000804 auto biasTensorAndData = CreateConstTensor(inputs[2],
805 biasTensorInfo,
806 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100807 layer = m_Network->AddConvolution2dLayer(desc,
808 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100809 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100810 layerName.c_str());
811 }
812 else
813 {
814 layer = m_Network->AddConvolution2dLayer(desc,
815 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100816 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100817 layerName.c_str());
818 }
819
820 BOOST_ASSERT(layer != nullptr);
821
telsoa01c577f2c2018-08-31 09:22:23 +0100822 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000823 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100824
825 // register the input connection slots for the layer, connections are made after all layers have been created
826 // only the tensors for the inputs are relevant, exclude the const tensors
827 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000828 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100829
jimfly01c25411c2018-11-14 17:47:22 +0000830 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100831 // register the output connection slots for the layer, connections are made after all layers have been created
832 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
833 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
834}
835
836void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
837{
838 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
839
840 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
841 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
842
843 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
844
845 DepthwiseConvolution2dDescriptor desc;
846 desc.m_BiasEnabled = false;
847 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
848 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000849 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100850 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100851
852 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
853 CHECK_VALID_SIZE(inputs.size(), 2, 3);
854 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
855 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100856 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
857 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000858
telsoa01c577f2c2018-08-31 09:22:23 +0100859 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
860 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
861
Matteo Martincigh747ef822018-12-18 09:26:39 +0000862 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100863 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
864 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000865
866 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100867 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
868 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
869
Matteo Martincigh747ef822018-12-18 09:26:39 +0000870 // Reshape weights as [ H, W, I, M ]
871 filterTensorInfo.SetShape({ filterHeight,
872 filterWidth,
873 inputTensorInfo.GetShape()[3],
874 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
875
876 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
877 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
878
Pablo Tellof0bd6832019-04-26 17:58:13 +0100879 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
880 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
881 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
882 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100883
Matteo Martincigh747ef822018-12-18 09:26:39 +0000884 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100885 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100886 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
887
888 if (inputs.size() == 3)
889 {
890 desc.m_BiasEnabled = true;
891 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000892 auto biasTensorAndData = CreateConstTensor(inputs[2],
893 biasTensorInfo,
894 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100895 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
896 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100897 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100898 layerName.c_str());
899 }
900 else
901 {
902 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
903 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100904 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100905 layerName.c_str());
906 }
907 BOOST_ASSERT(layer != nullptr);
908
telsoa01c577f2c2018-08-31 09:22:23 +0100909 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000910 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100911
912 // register the input connection slots for the layer, connections are made after all layers have been created
913 // only the tensors for the inputs are relevant, exclude the const tensors
914 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000915 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100916
jimfly01c25411c2018-11-14 17:47:22 +0000917 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100918 // register the output connection slots for the layer, connections are made after all layers have been created
919 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
920 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
921}
922
Keith Davis4cd29a02019-09-09 14:49:20 +0100923void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
924{
925 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
926
927 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +0100928 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +0100929
930 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
931 CHECK_VALID_SIZE(outputs.size(), 1);
932
933 armnn::IConnectableLayer* layer = nullptr;
934 auto layerName = boost::str(boost::format("Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
935
936 PermuteDescriptor desc;
937
josh minorba424d22019-11-13 10:55:17 -0600938 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +0100939 {
940 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
941 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -0600942 auto numPermVecElements = permuteTensorInfo.GetNumElements();
943 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +0100944 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
945
josh minorba424d22019-11-13 10:55:17 -0600946 // permuteShape assumes Tf/Np permute vectors, we must translate to armnn expected form
947 // to do so we find the perm vector which would invert what a tf perm vector would do (ex 3,0,1,2 -> 1,2,3,0)
948 std::vector<unsigned int> armnnPermuteShape(numPermVecElements);
949 std::vector<unsigned int>::iterator it;
950 for (unsigned int i = 0u; i < numPermVecElements; ++i)
951 {
952 it = std::find(permuteShape.begin(), permuteShape.end(), i);
953 armnnPermuteShape[i] = static_cast<unsigned int>(std::distance(permuteShape.begin(), it));
954 }
Kevin May85d92602019-09-27 17:21:06 +0100955
josh minorba424d22019-11-13 10:55:17 -0600956 PermutationVector permutationVector(armnnPermuteShape.data(), permuteTensorInfo.GetNumElements());
957
958 desc = PermuteDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +0100959 }
960
Keith Davis4cd29a02019-09-09 14:49:20 +0100961 layer = m_Network->AddPermuteLayer(desc, layerName.c_str());
962
963 BOOST_ASSERT(layer != nullptr);
964
965 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
966 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
967
968 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
969 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
970
971 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
972 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
973}
974
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100975void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
976{
977 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
978
979 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
980 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
981
982 TransposeConvolution2dDescriptor desc;
983 desc.m_BiasEnabled = false;
984 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
985 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
986 desc.m_DataLayout = armnn::DataLayout::NHWC;
987
988 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100989 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100990
991 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
992 CHECK_VALID_SIZE(outputs.size(), 1);
993
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100994 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100995 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
996
997 // TfLite uses NHWC tensors
998 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
999 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1000
1001 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1002 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1003
1004 CalcPadding(inputHeight,
1005 filterHeight,
1006 desc.m_StrideY,
1007 1, // DilationY
1008 desc.m_PadTop,
1009 desc.m_PadBottom,
1010 options->padding);
1011
1012 CalcPadding(inputWidth,
1013 filterWidth,
1014 desc.m_StrideX,
1015 1, // DilationX
1016 desc.m_PadLeft,
1017 desc.m_PadRight,
1018 options->padding);
1019
1020 auto filterTensorAndData = CreateConstTensor(inputs[1],
1021 filterTensorInfo,
1022 armnn::Optional<armnn::PermutationVector&>());
1023
1024 armnn::IConnectableLayer* layer = nullptr;
1025 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
1026
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001027 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1028 filterTensorAndData.first,
1029 EmptyOptional(),
1030 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001031
1032 BOOST_ASSERT(layer != nullptr);
1033
1034 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1035 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1036
1037 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1038 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001039 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001040
1041 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1042 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1043}
1044
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001045void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
1046{
1047 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1048}
1049
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001050void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
1051{
1052 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1053
1054 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1055 CHECK_VALID_SIZE(inputs.size(), 3);
1056
1057 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1058 CHECK_VALID_SIZE(outputs.size(), 1);
1059
1060 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1061 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1062
1063 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1064 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1065
1066 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1067 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1068
1069 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1070 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1071
1072 size_t step = 2;
1073 std::vector<std::pair<unsigned int, unsigned int>> crops;
1074 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1075 {
1076 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1077 }
1078
1079 armnn::BatchToSpaceNdDescriptor desc;
1080 desc.m_BlockShape = blockShape;
1081 desc.m_Crops = crops;
1082 desc.m_DataLayout = armnn::DataLayout::NHWC;
1083
1084 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1085
1086 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1087 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1088
1089 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1090
1091 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1092 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1093
1094 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1095 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1096}
1097
Matthew Jackson28c94572019-07-18 10:47:03 +01001098void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1099{
1100 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1101
1102 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1103 CHECK_VALID_SIZE(inputs.size(), 1);
1104
1105 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1106 CHECK_VALID_SIZE(outputs.size(), 1);
1107
1108 L2NormalizationDescriptor desc;
1109 desc.m_DataLayout = armnn::DataLayout::NHWC;
1110 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1111 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1112
1113 BOOST_ASSERT(layer != nullptr);
1114
1115 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1116 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1117
1118 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1119 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1120
1121 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1122 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1123}
1124
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001125void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1126{
1127 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1128}
1129
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001130void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1131{
1132 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1133
1134 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1135 CHECK_VALID_SIZE(inputs.size(), 2);
1136
1137 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1138 CHECK_VALID_SIZE(outputs.size(), 1);
1139
1140 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1141 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1142
1143 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1144 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1145
1146 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1147 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1148
1149 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1150 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1151 {
1152 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1153 }
1154 else
1155 {
1156 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1157 }
1158
1159 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1160 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1161}
1162
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001163void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1164{
1165 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1166
1167 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1168 CHECK_VALID_SIZE(inputs.size(), 2);
1169
1170 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1171 CHECK_VALID_SIZE(outputs.size(), 1);
1172
1173 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1174 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1175
1176 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1177 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1178
1179 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1180 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1181
1182 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1183 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1184 {
1185 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1186 }
1187 else
1188 {
1189 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1190 }
1191
1192 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1193 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1194}
1195
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001196void TfLiteParser::ParsePool(size_t subgraphIndex,
1197 size_t operatorIndex,
1198 PoolingAlgorithm algorithm)
1199{
1200 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1201
1202 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1203 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1204
1205 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1206
1207 std::string layerName;
1208
1209 switch (algorithm)
1210 {
1211 case PoolingAlgorithm::Average:
1212 layerName =
1213 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1214 break;
1215 case PoolingAlgorithm::Max:
1216 layerName =
1217 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1218 break;
1219 default:
1220 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1221 }
1222
1223 Pooling2dDescriptor desc;
1224
1225 desc.m_PoolType = algorithm;
1226 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1227 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1228 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1229 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1230 desc.m_PaddingMethod = PaddingMethod::Exclude;
1231 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001232 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001233
1234 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1235 CHECK_VALID_SIZE(inputs.size(), 1);
1236 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1237
1238 // assuming input is NHWC
1239 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1240 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1241
Pablo Tellof0bd6832019-04-26 17:58:13 +01001242 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1243 desc.m_PadTop, desc.m_PadBottom, options->padding);
1244 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1245 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001246
1247 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1248 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001249
1250 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1251
1252 BOOST_ASSERT(layer != nullptr);
1253
jimfly01c25411c2018-11-14 17:47:22 +00001254 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1255 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001256
1257 // register the input connection slots for the layer, connections are made after all layers have been created
1258 // only the tensors for the inputs are relevant, exclude the const tensors
1259 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001260 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001261
jimfly01c25411c2018-11-14 17:47:22 +00001262 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001263 // register the output connection slots for the layer, connections are made after all layers have been created
1264 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1265 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1266}
1267
josh minorba424d22019-11-13 10:55:17 -06001268void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
1269{
1270 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1271
1272 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1273 CHECK_VALID_SIZE(inputs.size(), 3);
1274 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1275 CHECK_VALID_SIZE(outputs.size(), 1);
1276
1277 SliceDescriptor desc;
1278
1279 // set begin tensor info for slice descriptor
1280 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1281 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1282
1283 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1284 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1285
1286 // set size tensor info for slice descriptor
1287 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1288 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1289
1290 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1291 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1292 desc = SliceDescriptor(begin, size);
1293
1294 auto layerName = boost::str(boost::format("Slice:%1%:%2%") % subgraphIndex % operatorIndex);
1295 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
1296
1297 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1298 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1299
1300 // register the input connection slots for the layer, connections are made after all layers have been created
1301 // only the tensors for the inputs are relevant, exclude the const tensors
1302 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1303 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1304
1305 // register the output connection slots for the layer, connections are made after all layers have been created
1306 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1307 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1308}
1309
telsoa01c577f2c2018-08-31 09:22:23 +01001310void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1311{
1312 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1313 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1314 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1315
1316 SoftmaxDescriptor desc;
1317 desc.m_Beta = options->beta;
1318
1319 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1320 CHECK_VALID_SIZE(inputs.size(), 1);
1321 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1322 CHECK_VALID_SIZE(outputs.size(), 1);
1323
1324 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1325 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1326
1327 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1328 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1329
1330 // register the input connection slots for the layer, connections are made after all layers have been created
1331 // only the tensors for the inputs are relevant, exclude the const tensors
1332 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1333 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1334
1335 // register the output connection slots for the layer, connections are made after all layers have been created
1336 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1337 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1338}
1339
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001340void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1341{
1342 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1343
1344 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1345 CHECK_VALID_SIZE(inputs.size(), 3);
1346
1347 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1348 CHECK_VALID_SIZE(outputs.size(), 1);
1349
1350 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1351 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1352
1353 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1354 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1355
1356 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1357 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1358
1359 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1360 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1361
1362 size_t step = 2;
1363 std::vector<std::pair<unsigned int, unsigned int>> padList;
1364 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1365 {
1366 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1367 }
1368
1369 armnn::SpaceToBatchNdDescriptor desc;
1370 desc.m_BlockShape = blockShape;
1371 desc.m_PadList = padList;
1372 desc.m_DataLayout = armnn::DataLayout::NHWC;
1373
1374 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1375
1376 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1377 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1378
1379 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1380
1381 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1382 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1383
1384 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1385 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1386}
1387
telsoa01c577f2c2018-08-31 09:22:23 +01001388armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1389 const armnn::TensorInfo & inputTensorInfo)
1390{
1391 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1392 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1393 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1394
1395 if (inputTensorInfo.GetNumDimensions() > 4)
1396 {
1397 std::stringstream ss;
1398 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1399 << " shape:" << inputTensorInfo.GetShape() << " "
1400 << CHECK_LOCATION().AsString();
1401 throw ParseException(ss.str());
1402 }
1403
1404 if (squeezeDims.empty())
1405 {
1406 squeezeDims.assign(dimensionSequence,
1407 dimensionSequence+inputTensorInfo.GetNumDimensions());
1408 }
1409
1410 std::vector<uint32_t> outputDims;
1411 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1412 {
1413 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1414 auto currentDimension = inputTensorInfo.GetShape()[i];
1415 if (skipSqueeze || currentDimension != 1)
1416 {
1417 outputDims.push_back(currentDimension);
1418 }
1419 }
1420
1421 if (outputDims.size() > 4)
1422 {
1423 std::stringstream ss;
1424 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1425 << " shape:" << inputTensorInfo.GetShape() << " "
1426 << CHECK_LOCATION().AsString();
1427 throw ParseException(ss.str());
1428 }
1429
1430 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1431 outputDims.data());
1432
1433 // we need to preserve the tensor type and the quantization data as well
1434 TensorInfo outTensorInfo = inputTensorInfo;
1435 outTensorInfo.SetShape(outShape);
1436
1437 return outTensorInfo;
1438}
1439
1440void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1441{
1442 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1443
1444 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1445 CHECK_VALID_SIZE(inputs.size(), 1);
1446
1447 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1448 CHECK_VALID_SIZE(outputs.size(), 1);
1449
1450 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1451 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1452
1453 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1454 armnn::TensorInfo outputTensorInfo =
1455 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1456 inputTensorInfo);
1457
1458 ReshapeDescriptor reshapeDesc;
1459 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1460
1461 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1462 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1463 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1464
1465 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1466 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1467
1468 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1469 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1470}
1471
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001472void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1473{
1474 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1475
1476 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1477 CHECK_VALID_SIZE(inputs.size(), 4);
1478
1479 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1480 CHECK_VALID_SIZE(outputs.size(), 1);
1481
1482 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1483 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1484
1485 StridedSliceDescriptor desc;
1486 desc.m_BeginMask = options->begin_mask;
1487 desc.m_EllipsisMask = options->ellipsis_mask;
1488 desc.m_EndMask = options->end_mask;
1489 desc.m_NewAxisMask = options->new_axis_mask;
1490 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1491 desc.m_DataLayout = armnn::DataLayout::NHWC;
1492
1493 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1494 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1495
1496 std::vector<int> begin(beginTensorInfo.GetNumElements());
1497 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1498
1499 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1500 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1501
1502 std::vector<int> end(endTensorInfo.GetNumElements());
1503 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1504
1505 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1506 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1507
1508 std::vector<int> stride(strideTensorInfo.GetNumElements());
1509 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1510
1511 desc.m_Begin = begin;
1512 desc.m_End = end;
1513 desc.m_Stride = stride;
1514
1515 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1516 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1517
1518 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1519 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1520
1521 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1522 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1523
1524 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1525 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1526}
1527
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001528void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1529{
1530 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1531
1532 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1533 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1534
1535 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1536 CHECK_VALID_SIZE(inputs.size(), 2);
1537
1538 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1539 CHECK_VALID_SIZE(outputs.size(), 1);
1540
1541 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1542 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1543
1544 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1545 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1546
1547 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1548 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1549
1550 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1551 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1552 {
1553 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1554 }
1555 else
1556 {
1557 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1558 }
1559
1560 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1561
1562 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1563 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1564}
1565
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001566void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1567{
1568 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1569
1570 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1571 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1572
1573 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1574 CHECK_VALID_SIZE(inputs.size(), 2);
1575
1576 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1577 CHECK_VALID_SIZE(outputs.size(), 1);
1578
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001579 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1580 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1581
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001582 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1583 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1584
1585 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1586 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1587
1588 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001589 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1590 {
1591 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1592 }
1593 else
1594 {
1595 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1596 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001597
1598 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1599
1600 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1601 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1602}
1603
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001604void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1605{
1606 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1607
1608 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1609 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1610
1611 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1612 CHECK_VALID_SIZE(inputs.size(), 2);
1613
1614 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1615 CHECK_VALID_SIZE(outputs.size(), 1);
1616
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001617 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1618 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1619
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001620 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1621 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1622
1623 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1624 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1625
1626 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001627 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1628 {
1629 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1630 }
1631 else
1632 {
1633 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1634 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001635
1636 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1637
1638 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1639 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1640}
1641
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001642void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1643{
1644 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1645
1646 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1647
1648 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1649 CHECK_VALID_SIZE(outputs.size(), 1);
1650
1651 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1652 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1653
1654 armnn::MeanDescriptor desc;
1655 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1656 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1657 desc.m_Axis = axis;
1658
1659 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1660 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1661
1662 desc.m_KeepDims =
1663 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1664 true : false;
1665
1666 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1667 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1668
1669 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1670
1671 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1672 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1673
1674 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1675 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1676}
1677
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001678void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1679{
1680 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1681
1682 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1683
1684 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1685 CHECK_VALID_SIZE(outputs.size(), 1);
1686
1687 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1688 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1689
1690 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1691 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1692
1693 size_t step = 2;
1694 armnn::PadDescriptor desc;
1695 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1696 {
1697 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1698 }
1699
1700 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1701 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1702
1703 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1704 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1705
1706 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1707 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1708
1709 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1710 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1711}
1712
Finn Williamsc42c3842019-01-22 14:18:11 +00001713
Sadik Armagan58f39192018-09-17 14:14:39 +01001714void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1715{
Finn Williamsc42c3842019-01-22 14:18:11 +00001716 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001717}
1718
1719void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1720{
Finn Williamsc42c3842019-01-22 14:18:11 +00001721 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1722}
Sadik Armagan58f39192018-09-17 14:14:39 +01001723
Finn Williamsc42c3842019-01-22 14:18:11 +00001724void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1725{
1726 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1727}
1728
Nina Drozd99851762019-04-09 09:37:38 +01001729void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1730{
1731 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1732}
1733
Finn Williamsc42c3842019-01-22 14:18:11 +00001734
1735void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1736{
1737 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001738 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1739 boost::ignore_unused(operatorPtr);
1740
1741 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1742 CHECK_VALID_SIZE(inputs.size(), 1);
1743
1744 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1745 CHECK_VALID_SIZE(outputs.size(), 1);
1746
Finn Williamsc42c3842019-01-22 14:18:11 +00001747 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001748 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001749 activationDesc.m_Function = activationType;
1750
1751 switch (activationType)
1752 {
1753 case ActivationFunction::ReLu:
1754 {
1755 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1756 break;
1757 }
1758 case ActivationFunction::BoundedReLu:
1759 {
1760 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1761 activationDesc.m_A = 6.0f;
1762 activationDesc.m_B = 0.0f;
1763 break;
1764 }
1765 case ActivationFunction::Sigmoid:
1766 {
1767 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1768 break;
1769 }
Nina Drozd99851762019-04-09 09:37:38 +01001770 case ActivationFunction::TanH:
1771 {
1772 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1773 activationDesc.m_A = 1.0f;
1774 activationDesc.m_B = 1.0f;
1775 break;
1776 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001777 default:
1778 {
1779 throw ParseException(
1780 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1781 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1782 }
1783 }
1784
1785 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001786
1787 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1788 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1789
1790 // register the input connection slots for the layer, connections are made after all layers have been created
1791 // only the tensors for the inputs are relevant, exclude the const tensors
1792 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1793 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1794
1795 // register the output connection slots for the layer, connections are made after all layers have been created
1796 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1797 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1798}
Sadikb94967b2018-09-19 15:30:00 +01001799armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1800 const std::vector<int32_t> & targetDimsIn)
1801{
1802 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1803 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1804
1805 if (stretchDim != targetDimsIn.end())
1806 {
1807 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1808 {
1809 throw ParseException(
1810 boost::str(
1811 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1812 }
1813
1814 auto targetNumElements =
1815 boost::numeric_cast<unsigned int>(
1816 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1817
1818 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1819 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1820 }
1821
1822 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1823
1824 TensorInfo reshapeInfo = inputTensorInfo;
1825 reshapeInfo.SetShape(outputShape);
1826
1827 return reshapeInfo;
1828}
1829
1830void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1831{
1832 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1833
1834 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001835
1836 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1837 CHECK_VALID_SIZE(outputs.size(), 1);
1838
1839 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1840 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1841
1842 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001843 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1844 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001845 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1846
kevmay0171972a82018-12-17 14:28:03 +00001847 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001848 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1849 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001850 {
1851 std::stringstream ss;
1852 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001853 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001854 << " does not equal output shape "
1855 << actualOutputTensorInfo.GetShape()
1856 << ": "
1857 << CHECK_LOCATION().AsString();
1858 throw ParseException(ss.str());
1859 }
1860
Sadikb94967b2018-09-19 15:30:00 +01001861 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001862 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001863
1864 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1865 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001866 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001867
1868 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1869 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1870
1871 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1872 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1873}
1874
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001875void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1876{
1877 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1878
1879 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1880 CHECK_VALID_SIZE(inputs.size(), 2);
1881
1882 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1883 CHECK_VALID_SIZE(outputs.size(), 1);
1884
1885 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1886
1887 // Data for the parsed tensor args (size) must be stored locally.
1888 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1889
1890 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1891 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1892
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001893 ResizeDescriptor desc;
1894 desc.m_Method = armnn::ResizeMethod::Bilinear;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001895 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001896 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1897 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001898
1899 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001900 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001901
1902 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1903 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1904
1905 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1906 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1907
1908 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1909 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1910}
1911
Sadik Armagan479045b2018-10-01 11:51:37 +01001912void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1913{
1914 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1915
1916 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1917 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1918
1919 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1920
1921 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1922 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1923 CHECK_VALID_SIZE(outputs.size(), 1);
1924
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001925 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1926 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001927
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001928 const unsigned int concatDimInput = static_cast<unsigned int>(
1929 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001930
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001931 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1932 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001933
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001934 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001935
1936 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1937 {
1938 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1939
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001940 // This set up concatDescriptor view origin
1941 armnnUtils::ProcessConcatInputTensorInfo(
1942 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001943 }
1944
1945 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01001946 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01001947
1948 BOOST_ASSERT(layer != nullptr);
1949
1950 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1951 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001952
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001953 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001954
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001955 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001956
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001957 // add fused activation layer
1958 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001959
Sadik Armagan479045b2018-10-01 11:51:37 +01001960 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1961 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1962}
1963
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001964void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1965{
1966 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1967
1968 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1969 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1970
1971 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1972
1973 FullyConnectedDescriptor desc;
1974 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001975 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001976
1977 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1978 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1979 CHECK_VALID_SIZE(outputs.size(), 1);
1980
1981 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1982
1983 // Fully Connected Layer accepts two dimensional weights input
1984 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1985 if (weightsDimension != 2)
1986 {
1987 throw ParseException(
1988 boost::str(
1989 boost::format(
1990 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1991 "Node %2%")
1992 % weightsDimension
1993 % CHECK_LOCATION().AsString()));
1994 }
1995
Matteo Martincigh747ef822018-12-18 09:26:39 +00001996 auto filterTensorAndData = CreateConstTensor(inputs[1],
1997 filterTensorInfo,
1998 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001999 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002000 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
2001
2002 if (inputs.size() == 3)
2003 {
2004 desc.m_BiasEnabled = true;
2005 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00002006 auto biasTensorAndData = CreateConstTensor(inputs[2],
2007 biasTensorInfo,
2008 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002009 layer = m_Network->AddFullyConnectedLayer(desc,
2010 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002011 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002012 layerName.c_str());
2013 }
2014 else
2015 {
2016 layer = m_Network->AddFullyConnectedLayer(desc,
2017 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002018 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002019 layerName.c_str());
2020 }
2021 BOOST_ASSERT(layer != nullptr);
2022
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002023 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2024
2025 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2026
2027 if (inputTensorInfo.GetNumDimensions() > 2)
2028 {
2029 // Add reshape to flatten to 2D [batch_size, input_size],
2030 // where "input_size" corresponds to the number of inputs to the layer,
2031 // matching the second dimension of weights,
2032 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2033 std::vector<unsigned int> reshapedDimensions(2);
2034 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2035 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2036
2037 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2038 {
2039 throw ParseException(
2040 boost::str(
2041 boost::format(
2042 "Failed to deduce input tensor shape from filter size %1%")
2043 % reshapedDimensions[1]
2044 % CHECK_LOCATION().AsString()));
2045 }
2046
2047 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2048 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2049
2050 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2051 armnn::ReshapeDescriptor desc;
2052 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2053 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2054
2055 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2056 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2057
2058 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2059 }
2060 else
2061 {
2062 // register the input connection slot for the layer
2063 // only the tensors for the inputs are relevant, exclude the const tensors
2064 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2065 }
2066
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002067 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2068 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2069
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002070 // we need to add the activation layer and fortunately we don't need to care about the data layout
2071 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2072 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002073
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002074 // register the output connection slots for the layer, connections are made after all layers have been created
2075 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2076 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2077}
2078
keidav011b3e2ea2019-02-21 10:07:37 +00002079void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
2080{
2081 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2082
2083 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2084
2085 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2086 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2087 CHECK_VALID_SIZE(outputs.size(), 4);
2088
2089 // Obtain custom options from flexbuffers
2090 auto custom_options = operatorPtr->custom_options;
2091 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2092
2093 // Obtain descriptor information from tf lite
2094 DetectionPostProcessDescriptor desc;
2095 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2096 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2097 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2098 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2099 desc.m_NumClasses = m["num_classes"].AsUInt32();
2100 desc.m_ScaleH = m["h_scale"].AsFloat();
2101 desc.m_ScaleW = m["w_scale"].AsFloat();
2102 desc.m_ScaleX = m["x_scale"].AsFloat();
2103 desc.m_ScaleY = m["y_scale"].AsFloat();
2104
keidav0107d58c72019-02-26 11:57:39 +00002105 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002106 {
keidav0107d58c72019-02-26 11:57:39 +00002107 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002108 }
2109 if (!(m["detections_per_class"].IsNull()))
2110 {
2111 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2112 }
2113
2114 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2115 {
2116 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2117 "must be positive and less than or equal to 1.");
2118 }
2119
2120 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2121 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2122 armnn::Optional<armnn::PermutationVector&>());
2123
2124 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2125 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2126 layerName.c_str());
2127
2128 BOOST_ASSERT(layer != nullptr);
2129
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002130 // The model does not specify the output shapes.
2131 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2132 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2133 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2134 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2135 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2136 m_OverridenOutputShapes.push_back({ 1 });
2137
keidav011b3e2ea2019-02-21 10:07:37 +00002138 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2139 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002140 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002141 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2142 }
2143
2144 // Register the input connection slots for the layer, connections are made after all layers have been created
2145 // only the tensors for the inputs are relevant, exclude the const tensors
2146 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2147 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2148
2149 // Register the output connection slots for the layer, connections are made after all layers have been created
2150 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2151 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2152 outputTensorIndexes[1],
2153 outputTensorIndexes[2],
2154 outputTensorIndexes[3]});
2155}
2156
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002157/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2158void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2159{
2160 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2161
2162 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2163 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2164 CHECK_VALID_SIZE(outputs.size(), 1);
2165
2166 if (inputs.size() < 1)
2167 {
2168 throw ParseException("Pack must have at least one input.");
2169 }
2170
2171 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2172 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2173
2174 StackDescriptor desc;
2175 desc.m_Axis = static_cast<uint32_t>(options->axis);
2176 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2177
2178 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2179 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2180 desc.m_InputShape = inputTensorInfo.GetShape();
2181
2182 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2183 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2184
2185 BOOST_ASSERT(layer != nullptr);
2186
2187 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2188 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2189
2190 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2191 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2192
2193 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2194 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2195}
2196
Nina Drozd200e3802019-04-15 09:47:39 +01002197void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2198{
2199 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2200
2201 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2202 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2203
2204 // This unpackAxis indicates the axis to unpack
2205 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2206
2207 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2208 CHECK_VALID_SIZE(inputs.size(), 1);
2209
2210 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002211
2212 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2213 {
2214 throw ParseException(
2215 boost::str(
2216 boost::format(
2217 "The unpack axis: %1% cannot be greater than or equal to "
2218 "the number of input dimension %2% %3%")
2219 % unpackAxis
2220 % inputTensorInfo.GetNumDimensions()
2221 % CHECK_LOCATION().AsString()));
2222 }
2223
Nina Drozd200e3802019-04-15 09:47:39 +01002224 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2225 // If num is not defined, automatically infer from the length of the dimension axis.
2226 if(unpackNum == 0)
2227 {
2228 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2229 }
2230
2231 // If unpack number cannot be inferred and is still zero, throw ParseException.
2232 if(unpackNum == 0)
2233 {
2234 throw ParseException("Number to unpack must greater than zero.");
2235 }
2236
2237 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2238 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2239
2240 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2241 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2242
2243 // Add current input shape to unpackDimSizes
2244 for (unsigned int i = 0; i < inputDimSize; ++i)
2245 {
2246 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2247 }
2248
2249 if (unpackDimSizes[unpackAxis] != unpackNum)
2250 {
2251 throw ParseException("Number to unpack must be the same as length of the dimension to "
2252 "unpack along.");
2253 }
2254
2255 unpackDimSizes[unpackAxis] /= unpackNum;
2256
2257 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2258 for (unsigned int j = 0; j < unpackNum; ++j)
2259 {
2260 // Set the size of the views.
2261 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2262 {
2263 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2264 }
2265 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2266 }
2267
2268 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2269 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2270
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002271 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2272 unpackDimSizes.data());
2273
Nina Drozd200e3802019-04-15 09:47:39 +01002274 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2275 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2276
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002277 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2278 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2279 {
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002280 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002281 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2282 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002283 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002284 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2285
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002286 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2287 outputTensorInfo.GetDataType(),
2288 outputTensorInfo.GetQuantizationScale(),
2289 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002290 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2291
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002292 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002293
2294 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2295 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2296 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2297 }
Nina Drozd200e3802019-04-15 09:47:39 +01002298}
2299
Nina Drozd0324f482019-04-08 10:52:10 +01002300void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2301{
2302 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2303
2304 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2305 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2306
2307 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2308
Nina Drozd200e3802019-04-15 09:47:39 +01002309 // If number of splits cannot be inferred and is zero, throw ParseException.
2310 if(numSplits == 0)
2311 {
2312 throw ParseException("Number to splits must greater than zero.");
2313 }
2314
Nina Drozd0324f482019-04-08 10:52:10 +01002315 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2316 CHECK_VALID_SIZE(inputs.size(), 2);
2317 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2318 CHECK_VALID_SIZE(outputs.size(), numSplits);
2319
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002320 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2321 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002322
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002323 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2324 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2325 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2326
2327 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2328 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002329
2330 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2331 if (splitDim == 0 || splitDim == 2)
2332 {
2333 throw ParseException(
2334 boost::str(
2335 boost::format(
2336 "Dimension %1% for split is not supported by Armnn. %2%")
2337 % splitDim
2338 % CHECK_LOCATION().AsString()));
2339 }
2340
2341 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002342 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002343 {
2344 throw ParseException(
2345 boost::str(
2346 boost::format(
2347 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002348 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002349 % inputTensorInfo.GetNumDimensions()
2350 % MaxNumOfTensorDimensions
2351 % CHECK_LOCATION().AsString()));
2352 }
2353
2354 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2355
2356 // Add current input shape to splitterDimSizes
2357 for (unsigned int i = 0; i < inputDimSize; ++i)
2358 {
2359 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2360 }
2361
2362 if (splitterDimSizes[splitDim] % numSplits != 0)
2363 {
2364 throw ParseException("Number of splits must evenly divide the dimension");
2365 }
2366 splitterDimSizes[splitDim] /= numSplits;
2367
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002368 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002369 for (unsigned int j = 0; j < numSplits; ++j)
2370 {
2371 // Set the size of the views.
2372 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2373 {
2374 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2375 }
2376 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2377 }
2378
2379 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2380 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2381
2382 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002383 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002384
Nina Drozd0324f482019-04-08 10:52:10 +01002385 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2386 {
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002387 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k]);
2388 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002389 }
2390
2391 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2392 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2393}
2394
Sadik Armagan58f39192018-09-17 14:14:39 +01002395armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2396 unsigned int outputSlot,
2397 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002398{
2399 ActivationDescriptor activationDesc;
2400 std::string layerName = prevLayer->GetName();
2401
2402 switch(activationType)
2403 {
2404 case tflite::ActivationFunctionType_NONE:
2405 {
2406 // this is a no-op: return previous layer
2407 return prevLayer;
2408 }
2409 case tflite::ActivationFunctionType_RELU:
2410 {
2411 activationDesc.m_Function = ActivationFunction::ReLu;
2412 layerName += ":RELU";
2413 break;
2414 }
2415 case tflite::ActivationFunctionType_RELU6:
2416 {
2417 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2418 activationDesc.m_A = 6.0f;
2419 activationDesc.m_B = 0.0f;
2420 layerName += ":RELU6";
2421 break;
2422 }
2423 case tflite::ActivationFunctionType_TANH:
2424 {
2425 activationDesc.m_Function = ActivationFunction::TanH;
2426 activationDesc.m_A = 1.0f;
2427 activationDesc.m_B = 1.0f;
2428 layerName += ":TANH";
2429 break;
2430 }
2431
2432 // I only put these here as a reminder what others we could support
2433 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2434 case tflite::ActivationFunctionType_SIGN_BIT:
2435 default:
2436 {
2437 throw ParseException(
2438 boost::str(
2439 boost::format("TfLite parser doesn't suppport fused activation: "
2440 "%1%/%2% %3% ") %
2441 activationType %
2442 tflite::EnumNameActivationFunctionType(activationType) %
2443 CHECK_LOCATION().AsString()));
2444
2445 }
2446 }
2447
2448 IConnectableLayer* activationLayer =
2449 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2450
2451 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2452 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2453 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2454 return activationLayer;
2455}
2456
2457TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2458{
2459 if (fileName == nullptr)
2460 {
2461 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2462 CHECK_LOCATION().AsString()));
2463 }
2464 boost::system::error_code errorCode;
2465 boost::filesystem::path pathToFile(fileName);
2466 if (!boost::filesystem::exists(pathToFile, errorCode))
2467 {
2468 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2469 fileName %
2470 errorCode %
2471 CHECK_LOCATION().AsString()));
2472 }
2473 std::ifstream file(fileName, std::ios::binary);
2474 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2475 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2476 fileContent.size());
2477}
2478
2479TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2480{
2481 if (binaryContent == nullptr)
2482 {
2483 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2484 CHECK_LOCATION().AsString()));
2485 }
2486 flatbuffers::Verifier verifier(binaryContent, len);
2487 if (verifier.VerifyBuffer<tflite::Model>() == false)
2488 {
2489 throw ParseException(
2490 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2491 "flatbuffers format. size:%1% %2%") %
2492 len %
2493 CHECK_LOCATION().AsString()));
2494 }
2495 return tflite::UnPackModel(binaryContent);
2496}
2497
2498TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2499 size_t subgraphIndex,
2500 size_t operatorIndex)
2501{
2502 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2503
Derek Lambertiff05cc52019-04-26 13:05:17 +01002504 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2505 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002506
2507 size_t inputCount = operatorPtr->inputs.size();
2508 TensorRawPtrVector result(inputCount);
2509 for (size_t i=0; i<inputCount; ++i)
2510 {
2511 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002512 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002513 }
2514 return result;
2515}
2516
2517TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2518 size_t subgraphIndex,
2519 size_t operatorIndex)
2520{
2521 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2522
Derek Lambertiff05cc52019-04-26 13:05:17 +01002523 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2524 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002525
2526 size_t outputCount = operatorPtr->outputs.size();
2527 TensorRawPtrVector result(outputCount);
2528 for (size_t i=0; i<outputCount; ++i)
2529 {
2530 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2531 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002532 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002533 }
2534 return result;
2535}
2536
2537TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2538 size_t subgraphIndex)
2539{
2540 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002541 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002542
Derek Lambertiff05cc52019-04-26 13:05:17 +01002543 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002544 TensorIdRawPtrVector result(inputCount);
2545 for (size_t i=0; i<inputCount; ++i)
2546 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002547 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002548 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002549 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002550 }
2551 return result;
2552}
2553
2554TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2555 size_t subgraphIndex)
2556{
2557 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002558 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002559
Derek Lambertiff05cc52019-04-26 13:05:17 +01002560 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002561 TensorIdRawPtrVector result(outputCount);
2562 for (size_t i=0; i<outputCount; ++i)
2563 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002564 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2565 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002566 }
2567 return result;
2568}
2569
2570std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2571 size_t subgraphIndex,
2572 size_t operatorIndex)
2573{
2574 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002575 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2576 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002577 return operatorPtr->inputs;
2578}
2579
2580std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2581 size_t subgraphIndex,
2582 size_t operatorIndex)
2583{
2584 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002585 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2586 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002587 return operatorPtr->outputs;
2588}
2589
2590void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2591 size_t operatorIndex,
2592 IConnectableLayer* layer,
2593 const std::vector<unsigned int>& tensorIndexes)
2594{
2595 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2596 BOOST_ASSERT(layer != nullptr);
2597 if (tensorIndexes.size() != layer->GetNumInputSlots())
2598 {
2599 throw ParseException(
2600 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2601 " for subgraph:%3% operator index:%4% %5%") %
2602 tensorIndexes.size() %
2603 layer->GetNumInputSlots() %
2604 subgraphIndex %
2605 operatorIndex %
2606 CHECK_LOCATION().AsString()));
2607 }
2608
2609 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2610 {
2611 unsigned int tensorIndex = tensorIndexes[slotIndex];
2612 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2613 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2614 }
2615}
2616
2617void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2618 size_t operatorIndex,
2619 IConnectableLayer* layer,
2620 const std::vector<unsigned int>& tensorIndexes)
2621{
2622 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2623 BOOST_ASSERT(layer != nullptr);
2624 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2625 {
2626 throw ParseException(
2627 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2628 " for subgraph:%3% operator index:%4% %5%") %
2629 tensorIndexes.size() %
2630 layer->GetNumOutputSlots() %
2631 subgraphIndex %
2632 operatorIndex %
2633 CHECK_LOCATION().AsString()));
2634 }
2635
2636 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2637 {
2638 unsigned int tensorIndex = tensorIndexes[slotIndex];
2639 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2640 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2641 }
2642}
2643
2644void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2645{
2646 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2647
2648 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2649 for (auto const & tensorIdAndPtr : inputs)
2650 {
2651 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2652 IConnectableLayer* layer =
2653 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2654
2655 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2656 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2657
2658 RegisterOutputSlots(subgraphIndex,
2659 VIRTUAL_OPERATOR_ID,
2660 layer,
2661 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2662 }
2663}
2664
2665void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2666{
2667 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2668
2669 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2670 for (auto const & tensorIdAndPtr : outputs)
2671 {
2672 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2673 IConnectableLayer* layer =
2674 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2675
2676 RegisterInputSlots(subgraphIndex,
2677 VIRTUAL_OPERATOR_ID,
2678 layer,
2679 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2680 }
2681}
2682
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002683void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2684{
2685 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2686
Derek Lambertiff05cc52019-04-26 13:05:17 +01002687 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002688 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2689 {
2690 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2691 {
2692 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2693 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2694 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002695 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002696 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2697 auto tensorAndData = CreateConstTensor(tensorPtr,
2698 tensorInfo,
2699 armnn::Optional<armnn::PermutationVector&>());
2700
2701 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2702 IConnectableLayer *layer =
2703 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2704
2705 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2706 RegisterOutputSlots(subgraphIndex,
2707 VIRTUAL_OPERATOR_ID,
2708 layer,
2709 { tensorIndex });
2710
2711 }
2712 }
2713 }
2714}
2715
telsoa01c577f2c2018-08-31 09:22:23 +01002716// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2717TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2718{
2719 CHECK_BUFFER(model, bufferIndex);
2720 return model->buffers[bufferIndex].get();
2721}
2722
Matteo Martincigh747ef822018-12-18 09:26:39 +00002723template<typename T>
2724std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2725TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2726 TfLiteParser::TensorRawPtr tensorPtr,
2727 armnn::TensorInfo& tensorInfo,
2728 armnn::Optional<armnn::PermutationVector&> permutationVector)
2729{
2730 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2731 tensorPtr,
2732 tensorInfo,
2733 permutationVector);
2734 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2735 return std::make_pair(constData.first, std::move(storage));
2736}
2737
telsoa01c577f2c2018-08-31 09:22:23 +01002738std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2739TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002740 armnn::TensorInfo& tensorInfo,
2741 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002742{
2743 CHECK_TENSOR_PTR(tensorPtr);
2744 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2745 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2746
2747 switch (tensorInfo.GetDataType())
2748 {
2749 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002750 return CreateConstTensorAndStoreData<float>(bufferPtr,
2751 tensorPtr,
2752 tensorInfo,
2753 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002754 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002755 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2756 tensorPtr,
2757 tensorInfo,
2758 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002759 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002760 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2761 tensorPtr,
2762 tensorInfo,
2763 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002764 default:
2765 {
2766 std::stringstream errString;
2767 errString << "Unexpected datatype when creating const tensor: "
2768 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2769 << " shape:" << tensorInfo.GetShape()
2770 << CHECK_LOCATION().AsString();
2771 throw ParseException(errString.str());
2772 }
2773 }
2774}
2775
2776BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2777 const std::string& name) const
2778{
2779 CHECK_SUBGRAPH(m_Model, subgraphId);
2780 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2781 for (auto const & input : inputs)
2782 {
2783 if (input.second->name == name)
2784 {
2785 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2786 return std::make_pair(bindingId, ToTensorInfo(input.second));
2787 }
2788 }
2789
2790 std::stringstream bindings;
2791 for (auto const & input : inputs)
2792 {
2793 bindings << "'" << input.second->name << "' ";
2794 }
2795
2796 throw ParseException(
2797 boost::str(
2798 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2799 "Possible inputs are: [%3%] %4%") %
2800 subgraphId %
2801 name %
2802 bindings.str() %
2803 CHECK_LOCATION().AsString()));
2804}
2805
2806BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2807 const std::string& name) const
2808{
2809 CHECK_SUBGRAPH(m_Model, subgraphId);
2810 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002811 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002812 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002813 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002814 if (output.second->name == name)
2815 {
2816 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002817 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2818 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2819 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002820 }
2821 }
2822
2823 std::stringstream bindings;
2824 for (auto const & output : outputs)
2825 {
2826 bindings << "'" << output.second->name << "' ";
2827 }
2828
2829 throw ParseException(
2830 boost::str(
2831 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2832 "Possible outputs are: [%3%] %4%") %
2833 subgraphId %
2834 name %
2835 bindings.str() %
2836 CHECK_LOCATION().AsString()));
2837}
2838
2839size_t TfLiteParser::GetSubgraphCount() const
2840{
2841 return m_Model->subgraphs.size();
2842}
2843
2844std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2845{
2846 CHECK_SUBGRAPH(m_Model, subgraphId);
2847 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2848 std::vector<std::string> result;
2849 result.reserve(inputs.size());
2850 for (auto const & input : inputs)
2851 {
2852 result.push_back(input.second->name);
2853 }
2854 return result;
2855}
2856
2857std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2858{
2859 CHECK_SUBGRAPH(m_Model, subgraphId);
2860 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2861 std::vector<std::string> result;
2862 result.reserve(outputs.size());
2863 for (auto const & output : outputs)
2864 {
2865 result.push_back(output.second->name);
2866 }
2867 return result;
2868}
2869
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01002870ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01002871{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01002872 return new TfLiteParser(options);
telsoa01c577f2c2018-08-31 09:22:23 +01002873}
2874
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01002875ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01002876{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01002877 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +01002878}
2879
2880void ITfLiteParser::Destroy(ITfLiteParser* parser)
2881{
2882 delete parser;
2883}
2884
2885TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2886: m_FloatData(std::move(data))
2887, m_Uint8Data(nullptr)
2888, m_Int32Data(nullptr)
2889{
2890}
2891
2892TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2893: m_FloatData(nullptr)
2894, m_Uint8Data(std::move(data))
2895, m_Int32Data(nullptr)
2896{
2897}
2898
2899TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2900: m_FloatData(nullptr)
2901, m_Uint8Data(nullptr)
2902, m_Int32Data(std::move(data))
2903{
2904}
2905
2906} // armnnTfLiteParser