blob: e19edc382150db400464f33e7863ac2cbfb4ac96 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
24
25#include <fstream>
26#include <algorithm>
27#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010028#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000029#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010030
31using namespace armnn;
32using armnn::CheckLocation;
33namespace armnnTfLiteParser
34{
35namespace
36{
jimfly01c25411c2018-11-14 17:47:22 +000037
telsoa01c577f2c2018-08-31 09:22:23 +010038const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
39
40void CheckSubgraph(const TfLiteParser::ModelPtr & model,
41 size_t subgraphIndex,
42 const CheckLocation & location)
43{
44 if (model.get() == nullptr)
45 {
46 throw ParseException(
47 boost::str(
48 boost::format("%1% was called with invalid (null) model. "
49 "Possible reason is that the model is not yet loaded and Unpack(ed). "
50 "subgraph:%2% at %3%") %
51 location.m_Function %
52 subgraphIndex %
53 location.FileLine()));
54 }
55 else if (subgraphIndex >= model->subgraphs.size())
56 {
57 throw ParseException(
58 boost::str(
59 boost::format("%1% was called with an invalid subgraph index. "
60 "subgraph:%2% at %3%") %
61 location.m_Function %
62 subgraphIndex %
63 location.FileLine()));
64 }
65}
66
67#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
68 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
69
70void CheckModel(const TfLiteParser::ModelPtr & model,
71 size_t subgraphIndex,
72 size_t operatorIndex,
73 const CheckLocation & location)
74{
75 if (model.get() == nullptr)
76 {
77 throw ParseException(
78 boost::str(
79 boost::format("%1% was called with invalid (null) model. "
80 "Possible reason is that the model is not yet loaded and Unpack(ed). "
81 "subgraph:%2% operator:%3% at %4%") %
82 location.m_Function %
83 subgraphIndex %
84 operatorIndex %
85 location.FileLine()));
86 }
87 else if (subgraphIndex >= model->subgraphs.size())
88 {
89 throw ParseException(
90 boost::str(
91 boost::format("%1% was called with an invalid subgraph index. "
92 "subgraph:%2% operator:%3% at %4%") %
93 location.m_Function %
94 subgraphIndex %
95 operatorIndex %
96 location.FileLine()));
97 }
98 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
99 operatorIndex != VIRTUAL_OPERATOR_ID)
100 {
101 throw ParseException(
102 boost::str(
103 boost::format("%1% was called with an invalid operator index. "
104 "subgraph:%2% operator:%3% at %4%") %
105 location.m_Function %
106 subgraphIndex %
107 operatorIndex %
108 location.FileLine()));
109 }
110}
111
112#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
113 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
114
115void CheckTensor(const TfLiteParser::ModelPtr & model,
116 size_t subgraphIndex,
117 size_t tensorIndex,
118 const CheckLocation & location)
119{
120 // not checking model, because I assume CHECK_MODEL already run
121 // and checked that. An assert would do.
122 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
123
124 // also subgraph index should be checked by CHECK_MODEL so
125 // I only add an assert here
126 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
127
128 // the tensor index is the only one to check here
129 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
130 {
131 throw ParseException(
132 boost::str(
133 boost::format("%1% was called with an invalid tensor index. "
134 "subgraph:%2% tensor:%3% at %4%") %
135 location.m_Function %
136 subgraphIndex %
137 tensorIndex %
138 location.FileLine()));
139 }
140}
141
142#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
143 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
144
145void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
146 const CheckLocation & location)
147{
148 if (rawPtr == nullptr)
149 {
150 throw ParseException(
151 boost::str(
152 boost::format("%1% was called with a null tensor pointer. "
153 "at %2%") %
154 location.m_Function %
155 location.FileLine()));
156
157 }
158}
159
160#define CHECK_TENSOR_PTR(TENSOR_PTR) \
161 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
162
163void CheckBuffer(const TfLiteParser::ModelPtr & model,
164 size_t bufferIndex,
165 const CheckLocation & location)
166{
167 if (model.get() == nullptr)
168 {
169 throw ParseException(
170 boost::str(
171 boost::format("%1% was called with invalid (null) model. "
172 "Possible reason is that the model is not yet loaded and Unpack(ed). "
173 "buffer:%2% at %3%") %
174 location.m_Function %
175 bufferIndex %
176 location.FileLine()));
177 }
178 else if (bufferIndex >= model->buffers.size())
179 {
180 throw ParseException(
181 boost::str(
182 boost::format("%1% was called with an invalid buffer index. "
183 "buffer index:%2% at %3%") %
184 location.m_Function %
185 bufferIndex %
186 location.FileLine()));
187 }
188 else if (model->buffers[bufferIndex].get() == nullptr)
189 {
190 throw ParseException(
191 boost::str(
192 boost::format("The buffer #%1% is null. %3%") %
193 bufferIndex %
194 location.AsString()));
195 }
196}
197
198#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
199 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
200
201void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
202 const armnn::TensorInfo & tensorInfo,
203 uint32_t bufferId,
204 const CheckLocation & location)
205{
206 if (bufferPtr == nullptr)
207 {
208 throw ParseException(
209 boost::str(
210 boost::format("BufferPtr is null for buffer:%1%. %2%") %
211 bufferId %
212 location.AsString()));
213 }
214 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
215 tensorInfo.GetNumBytes() > bufferPtr->data.size())
216 {
217 std::stringstream ss;
218 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
219 << "For tensor: " << tensorInfo.GetShape()
220 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
221 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
222 throw ParseException(ss.str());
223 }
224}
225
226#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
227 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
228
229bool IsActivationSupported(tflite::ActivationFunctionType activationType)
230{
231 switch(activationType)
232 {
233 case tflite::ActivationFunctionType_NONE:
234 case tflite::ActivationFunctionType_RELU:
235 case tflite::ActivationFunctionType_RELU6:
236 case tflite::ActivationFunctionType_TANH:
237 {
238 return true;
239 }
240 default:
241 {
242 return false;
243 }
244 }
245}
246
247#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
248 do { \
249 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
250 { \
251 throw ParseException( \
252 boost::str( \
253 boost::format("TfLite parser doesn't suppport fused activation: " \
254 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
255 OPTION->fused_activation_function % \
256 tflite::EnumNameActivationFunctionType(\
257 OPTION->fused_activation_function) % \
258 __func__ % \
259 SUBGRAPH_INDEX % \
260 OPERATOR_INDEX % \
261 CHECK_LOCATION().FileLine())); \
262 } \
263 } while(false)
264
265
266std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
267{
268 std::vector<unsigned int> result;
269 result.reserve(in.size());
270 for (auto & i : in)
271 {
272 result.push_back(CHECKED_NON_NEGATIVE(i));
273 }
274 return result;
275}
276
277void CalcPadding(uint32_t inputSize,
278 uint32_t filterSize,
279 uint32_t stride,
280 uint32_t& paddingFront,
281 uint32_t& paddingBack,
282 tflite::Padding padding)
283{
284 paddingFront = 0;
285 paddingBack = 0;
286 if (padding == tflite::Padding_SAME)
287 {
288 uint32_t outputSize = (inputSize + stride - 1) / stride;
289 uint32_t temp = (outputSize - 1) * stride + filterSize;
290 if (temp > inputSize)
291 {
292 paddingFront = (temp - inputSize) / 2;
293 paddingBack = (temp - inputSize) - paddingFront;
294 }
295 }
296}
297
298armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
299{
300 armnn::DataType type;
301 CHECK_TENSOR_PTR(tensorPtr);
302
303 switch (tensorPtr->type)
304 {
305 case tflite::TensorType_UINT8:
306 type = armnn::DataType::QuantisedAsymm8;
307 break;
308 case tflite::TensorType_FLOAT32:
309 type = armnn::DataType::Float32;
310 break;
311 case tflite::TensorType_INT32:
312 type = armnn::DataType::Signed32;
313 break;
314
315 default:
316 {
317 CheckLocation location = CHECK_LOCATION();
318 throw ParseException(
319 boost::str(
320 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
321 tensorPtr->type %
322 tflite::EnumNameTensorType(tensorPtr->type) %
323 tensorPtr->name %
324 location.AsString()));
325 }
326 }
327
328 float quantizationScale = 0.0f;
329 int32_t quantizationOffset = 0;
330
331 if (tensorPtr->quantization.get())
332 {
333 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
334 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
335
336 if (tensorPtr->quantization->scale.size() == 1)
337 {
338 quantizationScale = tensorPtr->quantization->scale[0];
339 }
340 if (tensorPtr->quantization->zero_point.size() == 1)
341 {
342 // NOTE: we lose precision here when converting from 64 bit to 32
343 // but this is what we support at the monent in ArmNN
344 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
345 }
346 }
347
348 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
349
350 // two statements (on purpose) for easier debugging:
351 armnn::TensorInfo result(static_cast<unsigned int>(tensorPtr->shape.size()),
352 dimensions.data(),
353 type,
354 quantizationScale,
355 quantizationOffset);
356 return result;
357}
358
359template<typename T>
360std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
361CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
362 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000363 armnn::TensorInfo& tensorInfo,
364 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100365{
366 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
367 BOOST_ASSERT_MSG(bufferPtr != nullptr,
368 boost::str(
369 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
370
371 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000372
373 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
374 {
375 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000376 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
377 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000378 }
379 else
380 {
381 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
382 }
383
telsoa01c577f2c2018-08-31 09:22:23 +0100384 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
385}
386
telsoa01c577f2c2018-08-31 09:22:23 +0100387armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
388{
389 // generate the binding id by shifting the tensor id by 8 bit
390 // and add the subgraph id, which allows 256 subgraphs
391 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
392}
393
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000394bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
395{
396 const unsigned int actualSize = actual.GetNumDimensions();
397 if (actualSize != expected.size())
398 {
399 return false;
400 }
401
402 for (unsigned int i = 0u; i < actualSize; i++)
403 {
404 if (expected[i] < 0 ||
405 actual[i] != static_cast<unsigned int>(expected[i]))
406 {
407 return false;
408 }
409 }
410
411 return true;
412}
413
telsoa01c577f2c2018-08-31 09:22:23 +0100414} // <anonymous>
415
416TfLiteParser::TfLiteParser()
417: m_Network(nullptr, nullptr)
418, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
419{
420 // register supported operators
421 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200422 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100423 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100424 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
425 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000426 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100427 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000428 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100429 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Sadik Armagan58f39192018-09-17 14:14:39 +0100430 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
431 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100432 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -0200433 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
Sadik Armagan479045b2018-10-01 11:51:37 +0100434 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
Bruno Goncalvesbaded142019-02-08 19:02:48 -0200435 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100436 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200437 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200438 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200439 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200440 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200441 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
telsoa01c577f2c2018-08-31 09:22:23 +0100442}
443
444void TfLiteParser::ResetParser()
445{
446 m_Network = armnn::INetworkPtr(nullptr, nullptr);
447 m_Model = nullptr;
448 m_SubgraphConnections.clear();
449}
450
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200451void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
452 size_t operatorIndex,
453 IConnectableLayer *layer)
454{
455 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
456 BOOST_ASSERT(layer != nullptr);
457
458 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
459 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
460
461 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
462
463 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
464 TensorRawPtr tensorPtr = subGraphPtr->tensors[reshapedInputId].get();
465 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
466 TensorRawPtr tensorPtr1 = subGraphPtr->tensors[inputId].get();
467
468 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
469 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
470
471 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
472 {
473 uint32_t id = reshapedInputId;
474 reshapedInputId = inputId;
475 inputId = id;
476
477 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
478 inputTensorInfo = ToTensorInfo(tensorPtr);
479 }
480
481 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
482
483 std::vector<unsigned> reshapedDim;
484 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
485 {
486 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
487 }
488
489 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
490 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
491
492 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
493
494 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
495 armnn::ReshapeDescriptor desc;
496 desc.m_TargetShape = reshapedTensorInfo.GetShape();
497 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
498
499 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
500 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
501
502 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
503
504 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
505 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
506}
507
telsoa01c577f2c2018-08-31 09:22:23 +0100508INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
509{
510 ResetParser();
511 m_Model = LoadModelFromFile(graphFile);
512 return CreateNetworkFromModel();
513}
514
515INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
516{
517 ResetParser();
518 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
519 return CreateNetworkFromModel();
520}
521
522INetworkPtr TfLiteParser::CreateNetworkFromModel()
523{
524 m_Network = INetwork::Create();
525 BOOST_ASSERT(m_Model.get() != nullptr);
526
527 bool failedToCreate = false;
528 std::stringstream errors;
529
530 if (m_Model->subgraphs.size() != 1)
531 {
532 throw ParseException(
533 boost::str(
534 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
535 m_Model->subgraphs.size() %
536 CHECK_LOCATION().AsString()));
537 }
538
539 size_t subgraphIndex = 0;
540 for (SubGraphPtr const & subgraph : m_Model->subgraphs)
541 {
542 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
543
544 size_t operatorIndex = 0;
545 for (OperatorPtr const & op : subgraph->operators)
546 {
547 try
548 {
telsoa01c577f2c2018-08-31 09:22:23 +0100549 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
550 auto builtinCode = opCodePtr->builtin_code;
551
552 if (builtinCode > tflite::BuiltinOperator_MAX)
553 {
554 throw ParseException(
555 boost::str(
556 boost::format("Operator code %1% is out of range 0-%2%. "
557 "subgraph:%3% operator idx:%4%. %5%") %
558 builtinCode %
559 tflite::BuiltinOperator_MAX %
560 subgraphIndex %
561 operatorIndex %
562 CHECK_LOCATION().AsString()));
563 }
564
565 // lookup and call the parser function
566 auto & parserFunction = m_ParserFunctions[builtinCode];
567 (this->*parserFunction)(subgraphIndex, operatorIndex);
568 }
569 catch (const ParseException& e)
570 {
571 failedToCreate = true;
572 std::stringstream errorString;
573
574 errorString << "Failed to parse operator #" << operatorIndex
575 << " within subgraph #" << subgraphIndex
576 << " error: " << e.what();
577 BOOST_LOG_TRIVIAL(error) << errorString.str();
578
579 errors << errorString.str() << "\n";
580 }
581 ++operatorIndex;
582 }
583
584 SetupInputLayers(subgraphIndex);
585 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200586 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100587
588 ++subgraphIndex;
589 }
590
591 if (failedToCreate)
592 {
593 // we can skip everything and let the outer exception handler deal with the error
594 throw ParseException(errors.str());
595 }
596
597 // establish the connections from the layer outputs to the inputs of the subsequent layers
598 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
599 {
600 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
601 {
602 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
603 {
604 for (size_t inputSlotIdx = 0;
605 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
606 ++inputSlotIdx)
607 {
608 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
609 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
610 }
611 }
612 }
613 }
614
615 return std::move(m_Network);
616}
617
618void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
619 size_t tensorIndex,
620 armnn::IOutputSlot* slot)
621{
622 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
623 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
624 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
625
626 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
627
628 // assuming there is only one producer for that tensor
629 if (tensorSlots.outputSlot != nullptr)
630 {
631 throw ParseException(boost::str(
632 boost::format("Another layer has already registered itself as the producer of "
633 "subgraph:%1% tensor:%2% %3%") %
634 subgraphIndex %
635 tensorIndex %
636 CHECK_LOCATION().AsString()));
637 }
638
639 tensorSlots.outputSlot = slot;
640}
641
642void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
643 size_t tensorIndex,
644 armnn::IInputSlot* slot)
645{
646 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
647 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
648 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
649
650 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
651 tensorSlots.inputSlots.push_back(slot);
652}
653
654void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
655{
656 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
657 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
658 //
659 auto opcodeIndex = operatorPtr->opcode_index;
660 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
661
662 throw ParseException(
663 boost::str(
664 boost::format("Operator not supported. "
665 "subgraph:%1% operator:%2% "
666 "opcode_index:%3% opcode:%4% / %5% %6%") %
667 subgraphIndex %
668 operatorIndex %
669 opcodeIndex %
670 opcode %
671 tflite::EnumNameBuiltinOperator(opcode) %
672 CHECK_LOCATION().AsString()));
673}
674
telsoa01c577f2c2018-08-31 09:22:23 +0100675void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
676{
677 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
678
679 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
680 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
681
682 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
683
684 Convolution2dDescriptor desc;
685 desc.m_BiasEnabled = false;
686 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
687 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000688 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100689
690 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
691 CHECK_VALID_SIZE(inputs.size(), 2, 3);
692
693 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
694 CHECK_VALID_SIZE(outputs.size(), 1);
695
696 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
697 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
698
699 // assuming input is NHWC
700 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
701 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
702
703 // assuming the filter is OHWI : Output, H, W, Input
704 // which is essentially the same as NHWC
705 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
706 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
707
708 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
709 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
710
Matteo Martincigh747ef822018-12-18 09:26:39 +0000711 auto filterTensorAndData = CreateConstTensor(inputs[1],
712 filterTensorInfo,
713 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100714 armnn::IConnectableLayer* layer;
715
716 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
717
718 if (inputs.size() == 3)
719 {
720 desc.m_BiasEnabled = true;
721 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000722 auto biasTensorAndData = CreateConstTensor(inputs[2],
723 biasTensorInfo,
724 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100725 layer = m_Network->AddConvolution2dLayer(desc,
726 filterTensorAndData.first,
727 biasTensorAndData.first,
728 layerName.c_str());
729 }
730 else
731 {
732 layer = m_Network->AddConvolution2dLayer(desc,
733 filterTensorAndData.first,
734 layerName.c_str());
735 }
736
737 BOOST_ASSERT(layer != nullptr);
738
telsoa01c577f2c2018-08-31 09:22:23 +0100739 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000740 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100741
742 // register the input connection slots for the layer, connections are made after all layers have been created
743 // only the tensors for the inputs are relevant, exclude the const tensors
744 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000745 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100746
jimfly01c25411c2018-11-14 17:47:22 +0000747 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100748 // register the output connection slots for the layer, connections are made after all layers have been created
749 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
750 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
751}
752
753void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
754{
755 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
756
757 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
758 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
759
760 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
761
762 DepthwiseConvolution2dDescriptor desc;
763 desc.m_BiasEnabled = false;
764 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
765 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000766 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100767 // ACL only supports a depth (channel) multiplier of 1, it is not currently stored in the descriptor
768 CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1);
769
770 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
771 CHECK_VALID_SIZE(inputs.size(), 2, 3);
772 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
773 CHECK_VALID_SIZE(outputs.size(), 1);
774
775 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
776 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
777
Matteo Martincigh747ef822018-12-18 09:26:39 +0000778 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100779 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
780 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000781
782 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100783 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
784 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
785
Matteo Martincigh747ef822018-12-18 09:26:39 +0000786 // Reshape weights as [ H, W, I, M ]
787 filterTensorInfo.SetShape({ filterHeight,
788 filterWidth,
789 inputTensorInfo.GetShape()[3],
790 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
791
792 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
793 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
794
telsoa01c577f2c2018-08-31 09:22:23 +0100795 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
796 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
797
Matteo Martincigh747ef822018-12-18 09:26:39 +0000798 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100799 armnn::IConnectableLayer* layer;
800 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
801
802 if (inputs.size() == 3)
803 {
804 desc.m_BiasEnabled = true;
805 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000806 auto biasTensorAndData = CreateConstTensor(inputs[2],
807 biasTensorInfo,
808 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100809 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
810 filterTensorAndData.first,
811 biasTensorAndData.first,
812 layerName.c_str());
813 }
814 else
815 {
816 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
817 filterTensorAndData.first,
818 layerName.c_str());
819 }
820 BOOST_ASSERT(layer != nullptr);
821
telsoa01c577f2c2018-08-31 09:22:23 +0100822 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000823 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100824
825 // register the input connection slots for the layer, connections are made after all layers have been created
826 // only the tensors for the inputs are relevant, exclude the const tensors
827 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000828 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100829
jimfly01c25411c2018-11-14 17:47:22 +0000830 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100831 // register the output connection slots for the layer, connections are made after all layers have been created
832 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
833 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
834}
835
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100836void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
837{
838 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
839}
840
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200841void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
842{
843 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
844
845 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
846 CHECK_VALID_SIZE(inputs.size(), 3);
847
848 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
849 CHECK_VALID_SIZE(outputs.size(), 1);
850
851 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
852 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
853
854 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
855 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
856
857 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
858 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
859
860 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
861 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
862
863 size_t step = 2;
864 std::vector<std::pair<unsigned int, unsigned int>> crops;
865 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
866 {
867 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
868 }
869
870 armnn::BatchToSpaceNdDescriptor desc;
871 desc.m_BlockShape = blockShape;
872 desc.m_Crops = crops;
873 desc.m_DataLayout = armnn::DataLayout::NHWC;
874
875 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
876
877 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
878 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
879
880 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
881
882 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
883 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
884
885 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
886 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
887}
888
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100889void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
890{
891 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
892}
893
894void TfLiteParser::ParsePool(size_t subgraphIndex,
895 size_t operatorIndex,
896 PoolingAlgorithm algorithm)
897{
898 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
899
900 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
901 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
902
903 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
904
905 std::string layerName;
906
907 switch (algorithm)
908 {
909 case PoolingAlgorithm::Average:
910 layerName =
911 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
912 break;
913 case PoolingAlgorithm::Max:
914 layerName =
915 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
916 break;
917 default:
918 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
919 }
920
921 Pooling2dDescriptor desc;
922
923 desc.m_PoolType = algorithm;
924 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
925 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
926 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
927 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
928 desc.m_PaddingMethod = PaddingMethod::Exclude;
929 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +0000930 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100931
932 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
933 CHECK_VALID_SIZE(inputs.size(), 1);
934 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
935
936 // assuming input is NHWC
937 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
938 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
939
940 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
941 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
942
943 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
944 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100945
946 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
947
948 BOOST_ASSERT(layer != nullptr);
949
jimfly01c25411c2018-11-14 17:47:22 +0000950 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
951 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100952
953 // register the input connection slots for the layer, connections are made after all layers have been created
954 // only the tensors for the inputs are relevant, exclude the const tensors
955 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000956 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100957
jimfly01c25411c2018-11-14 17:47:22 +0000958 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100959 // register the output connection slots for the layer, connections are made after all layers have been created
960 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
961 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
962}
963
telsoa01c577f2c2018-08-31 09:22:23 +0100964void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
965{
966 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
967 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
968 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
969
970 SoftmaxDescriptor desc;
971 desc.m_Beta = options->beta;
972
973 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
974 CHECK_VALID_SIZE(inputs.size(), 1);
975 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
976 CHECK_VALID_SIZE(outputs.size(), 1);
977
978 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
979 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
980
981 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
982 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
983
984 // register the input connection slots for the layer, connections are made after all layers have been created
985 // only the tensors for the inputs are relevant, exclude the const tensors
986 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
987 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
988
989 // register the output connection slots for the layer, connections are made after all layers have been created
990 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
991 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
992}
993
Bruno Goncalvesbaded142019-02-08 19:02:48 -0200994void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
995{
996 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
997
998 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
999 CHECK_VALID_SIZE(inputs.size(), 3);
1000
1001 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1002 CHECK_VALID_SIZE(outputs.size(), 1);
1003
1004 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1005 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1006
1007 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1008 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1009
1010 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1011 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1012
1013 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1014 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1015
1016 size_t step = 2;
1017 std::vector<std::pair<unsigned int, unsigned int>> padList;
1018 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1019 {
1020 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1021 }
1022
1023 armnn::SpaceToBatchNdDescriptor desc;
1024 desc.m_BlockShape = blockShape;
1025 desc.m_PadList = padList;
1026 desc.m_DataLayout = armnn::DataLayout::NHWC;
1027
1028 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1029
1030 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1031 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1032
1033 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1034
1035 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1036 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1037
1038 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1039 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1040}
1041
telsoa01c577f2c2018-08-31 09:22:23 +01001042armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1043 const armnn::TensorInfo & inputTensorInfo)
1044{
1045 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1046 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1047 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1048
1049 if (inputTensorInfo.GetNumDimensions() > 4)
1050 {
1051 std::stringstream ss;
1052 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1053 << " shape:" << inputTensorInfo.GetShape() << " "
1054 << CHECK_LOCATION().AsString();
1055 throw ParseException(ss.str());
1056 }
1057
1058 if (squeezeDims.empty())
1059 {
1060 squeezeDims.assign(dimensionSequence,
1061 dimensionSequence+inputTensorInfo.GetNumDimensions());
1062 }
1063
1064 std::vector<uint32_t> outputDims;
1065 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1066 {
1067 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1068 auto currentDimension = inputTensorInfo.GetShape()[i];
1069 if (skipSqueeze || currentDimension != 1)
1070 {
1071 outputDims.push_back(currentDimension);
1072 }
1073 }
1074
1075 if (outputDims.size() > 4)
1076 {
1077 std::stringstream ss;
1078 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1079 << " shape:" << inputTensorInfo.GetShape() << " "
1080 << CHECK_LOCATION().AsString();
1081 throw ParseException(ss.str());
1082 }
1083
1084 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1085 outputDims.data());
1086
1087 // we need to preserve the tensor type and the quantization data as well
1088 TensorInfo outTensorInfo = inputTensorInfo;
1089 outTensorInfo.SetShape(outShape);
1090
1091 return outTensorInfo;
1092}
1093
1094void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1095{
1096 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1097
1098 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1099 CHECK_VALID_SIZE(inputs.size(), 1);
1100
1101 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1102 CHECK_VALID_SIZE(outputs.size(), 1);
1103
1104 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1105 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1106
1107 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1108 armnn::TensorInfo outputTensorInfo =
1109 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1110 inputTensorInfo);
1111
1112 ReshapeDescriptor reshapeDesc;
1113 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1114
1115 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1116 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1117 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1118
1119 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1120 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1121
1122 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1123 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1124}
1125
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001126void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1127{
1128 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1129
1130 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1131 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1132
1133 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1134 CHECK_VALID_SIZE(inputs.size(), 2);
1135
1136 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1137 CHECK_VALID_SIZE(outputs.size(), 1);
1138
1139 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1140 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1141
1142 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1143 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1144
1145 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1146 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1147
1148 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1149 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1150 {
1151 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1152 }
1153 else
1154 {
1155 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1156 }
1157
1158 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1159
1160 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1161 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1162}
1163
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001164void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1165{
1166 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1167
1168 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1169 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1170
1171 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1172 CHECK_VALID_SIZE(inputs.size(), 2);
1173
1174 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1175 CHECK_VALID_SIZE(outputs.size(), 1);
1176
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001177 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1178 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1179
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001180 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1181 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1182
1183 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1184 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1185
1186 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001187 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1188 {
1189 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1190 }
1191 else
1192 {
1193 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1194 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001195
1196 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1197
1198 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1199 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1200}
1201
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001202void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1203{
1204 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1205
1206 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1207 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1208
1209 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1210 CHECK_VALID_SIZE(inputs.size(), 2);
1211
1212 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1213 CHECK_VALID_SIZE(outputs.size(), 1);
1214
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001215 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1216 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1217
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001218 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1219 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1220
1221 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1222 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1223
1224 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001225 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1226 {
1227 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1228 }
1229 else
1230 {
1231 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1232 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001233
1234 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1235
1236 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1237 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1238}
1239
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001240void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1241{
1242 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1243
1244 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1245
1246 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1247 CHECK_VALID_SIZE(outputs.size(), 1);
1248
1249 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1250 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1251
1252 armnn::MeanDescriptor desc;
1253 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1254 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1255 desc.m_Axis = axis;
1256
1257 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1258 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1259
1260 desc.m_KeepDims =
1261 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1262 true : false;
1263
1264 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1265 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1266
1267 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1268
1269 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1270 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1271
1272 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1273 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1274}
1275
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001276void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1277{
1278 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1279
1280 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1281
1282 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1283 CHECK_VALID_SIZE(outputs.size(), 1);
1284
1285 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1286 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1287
1288 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1289 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1290
1291 size_t step = 2;
1292 armnn::PadDescriptor desc;
1293 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1294 {
1295 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1296 }
1297
1298 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1299 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1300
1301 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1302 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1303
1304 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1305 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1306
1307 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1308 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1309}
1310
Finn Williamsc42c3842019-01-22 14:18:11 +00001311
Sadik Armagan58f39192018-09-17 14:14:39 +01001312void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1313{
Finn Williamsc42c3842019-01-22 14:18:11 +00001314 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001315}
1316
1317void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1318{
Finn Williamsc42c3842019-01-22 14:18:11 +00001319 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1320}
Sadik Armagan58f39192018-09-17 14:14:39 +01001321
Finn Williamsc42c3842019-01-22 14:18:11 +00001322void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1323{
1324 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1325}
1326
1327
1328void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1329{
1330 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001331 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1332 boost::ignore_unused(operatorPtr);
1333
1334 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1335 CHECK_VALID_SIZE(inputs.size(), 1);
1336
1337 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1338 CHECK_VALID_SIZE(outputs.size(), 1);
1339
Finn Williamsc42c3842019-01-22 14:18:11 +00001340 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001341 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001342 activationDesc.m_Function = activationType;
1343
1344 switch (activationType)
1345 {
1346 case ActivationFunction::ReLu:
1347 {
1348 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1349 break;
1350 }
1351 case ActivationFunction::BoundedReLu:
1352 {
1353 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1354 activationDesc.m_A = 6.0f;
1355 activationDesc.m_B = 0.0f;
1356 break;
1357 }
1358 case ActivationFunction::Sigmoid:
1359 {
1360 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1361 break;
1362 }
1363 default:
1364 {
1365 throw ParseException(
1366 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1367 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1368 }
1369 }
1370
1371 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001372
1373 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1374 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1375
1376 // register the input connection slots for the layer, connections are made after all layers have been created
1377 // only the tensors for the inputs are relevant, exclude the const tensors
1378 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1379 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1380
1381 // register the output connection slots for the layer, connections are made after all layers have been created
1382 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1383 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1384}
Sadikb94967b2018-09-19 15:30:00 +01001385armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1386 const std::vector<int32_t> & targetDimsIn)
1387{
1388 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1389 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1390
1391 if (stretchDim != targetDimsIn.end())
1392 {
1393 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1394 {
1395 throw ParseException(
1396 boost::str(
1397 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1398 }
1399
1400 auto targetNumElements =
1401 boost::numeric_cast<unsigned int>(
1402 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1403
1404 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1405 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1406 }
1407
1408 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1409
1410 TensorInfo reshapeInfo = inputTensorInfo;
1411 reshapeInfo.SetShape(outputShape);
1412
1413 return reshapeInfo;
1414}
1415
1416void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1417{
1418 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1419
1420 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001421
1422 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1423 CHECK_VALID_SIZE(outputs.size(), 1);
1424
1425 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1426 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1427
1428 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001429 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1430 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001431 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1432
kevmay0171972a82018-12-17 14:28:03 +00001433 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001434 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1435 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001436 {
1437 std::stringstream ss;
1438 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001439 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001440 << " does not equal output shape "
1441 << actualOutputTensorInfo.GetShape()
1442 << ": "
1443 << CHECK_LOCATION().AsString();
1444 throw ParseException(ss.str());
1445 }
1446
Sadikb94967b2018-09-19 15:30:00 +01001447 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001448 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001449
1450 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1451 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001452 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001453
1454 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1455 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1456
1457 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1458 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1459}
1460
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001461void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1462{
1463 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1464
1465 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1466 CHECK_VALID_SIZE(inputs.size(), 2);
1467
1468 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1469 CHECK_VALID_SIZE(outputs.size(), 1);
1470
1471 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1472
1473 // Data for the parsed tensor args (size) must be stored locally.
1474 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1475
1476 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1477 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1478
1479 ResizeBilinearDescriptor desc;
1480 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
1481 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1482 desc.m_DataLayout = armnn::DataLayout::NHWC;
1483
1484 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
1485 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, layerName.c_str());
1486
1487 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1488 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1489
1490 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1491 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1492
1493 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1494 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1495}
1496
Sadik Armagan479045b2018-10-01 11:51:37 +01001497void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1498{
1499 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1500
1501 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1502 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1503
1504 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1505
1506 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1507 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1508 CHECK_VALID_SIZE(outputs.size(), 1);
1509
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001510 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1511 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001512
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001513 const unsigned int concatDimInput = static_cast<unsigned int>(
1514 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001515
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001516 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1517 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001518
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001519 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001520
1521 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1522 {
1523 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1524
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001525 // This set up concatDescriptor view origin
1526 armnnUtils::ProcessConcatInputTensorInfo(
1527 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001528 }
1529
1530 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
1531 IConnectableLayer* layer = m_Network->AddMergerLayer(concatDescriptor, layerName.c_str());
1532
1533 BOOST_ASSERT(layer != nullptr);
1534
1535 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1536 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001537
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001538 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001539
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001540 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001541
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001542 // add fused activation layer
1543 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001544
Sadik Armagan479045b2018-10-01 11:51:37 +01001545 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1546 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1547}
1548
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001549void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1550{
1551 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1552
1553 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1554 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1555
1556 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1557
1558 FullyConnectedDescriptor desc;
1559 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001560 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001561
1562 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1563 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1564 CHECK_VALID_SIZE(outputs.size(), 1);
1565
1566 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1567
1568 // Fully Connected Layer accepts two dimensional weights input
1569 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1570 if (weightsDimension != 2)
1571 {
1572 throw ParseException(
1573 boost::str(
1574 boost::format(
1575 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1576 "Node %2%")
1577 % weightsDimension
1578 % CHECK_LOCATION().AsString()));
1579 }
1580
Matteo Martincigh747ef822018-12-18 09:26:39 +00001581 auto filterTensorAndData = CreateConstTensor(inputs[1],
1582 filterTensorInfo,
1583 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001584 armnn::IConnectableLayer* layer;
1585 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1586
1587 if (inputs.size() == 3)
1588 {
1589 desc.m_BiasEnabled = true;
1590 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001591 auto biasTensorAndData = CreateConstTensor(inputs[2],
1592 biasTensorInfo,
1593 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001594 layer = m_Network->AddFullyConnectedLayer(desc,
1595 filterTensorAndData.first,
1596 biasTensorAndData.first,
1597 layerName.c_str());
1598 }
1599 else
1600 {
1601 layer = m_Network->AddFullyConnectedLayer(desc,
1602 filterTensorAndData.first,
1603 layerName.c_str());
1604 }
1605 BOOST_ASSERT(layer != nullptr);
1606
1607 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1608 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1609
1610 // register the input connection slot for the layer
1611 // only the tensors for the inputs are relevant, exclude the const tensors
1612 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1613 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1614
1615 // we need to add the activation layer and fortunately we don't need to care about the data layout
1616 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1617 options->fused_activation_function);
1618 // register the output connection slots for the layer, connections are made after all layers have been created
1619 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1620 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1621}
1622
keidav011b3e2ea2019-02-21 10:07:37 +00001623void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1624{
1625 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1626
1627 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1628
1629 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1630 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1631 CHECK_VALID_SIZE(outputs.size(), 4);
1632
1633 // Obtain custom options from flexbuffers
1634 auto custom_options = operatorPtr->custom_options;
1635 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1636
1637 // Obtain descriptor information from tf lite
1638 DetectionPostProcessDescriptor desc;
1639 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1640 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1641 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1642 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1643 desc.m_NumClasses = m["num_classes"].AsUInt32();
1644 desc.m_ScaleH = m["h_scale"].AsFloat();
1645 desc.m_ScaleW = m["w_scale"].AsFloat();
1646 desc.m_ScaleX = m["x_scale"].AsFloat();
1647 desc.m_ScaleY = m["y_scale"].AsFloat();
1648
1649 if (!(m["use_regular_non_max_suppression"].IsNull()))
1650 {
1651 desc.m_UseRegularNms = m["use_regular_non_max_suppression"].AsBool();
1652 }
1653 if (!(m["detections_per_class"].IsNull()))
1654 {
1655 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1656 }
1657
1658 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1659 {
1660 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1661 "must be positive and less than or equal to 1.");
1662 }
1663
1664 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
1665 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
1666 armnn::Optional<armnn::PermutationVector&>());
1667
1668 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
1669 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
1670 layerName.c_str());
1671
1672 BOOST_ASSERT(layer != nullptr);
1673
1674 // Register outputs
1675 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
1676 {
1677 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i]);
1678 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
1679 }
1680
1681 // Register the input connection slots for the layer, connections are made after all layers have been created
1682 // only the tensors for the inputs are relevant, exclude the const tensors
1683 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1684 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1685
1686 // Register the output connection slots for the layer, connections are made after all layers have been created
1687 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1688 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
1689 outputTensorIndexes[1],
1690 outputTensorIndexes[2],
1691 outputTensorIndexes[3]});
1692}
1693
Sadik Armagan58f39192018-09-17 14:14:39 +01001694armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
1695 unsigned int outputSlot,
1696 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01001697{
1698 ActivationDescriptor activationDesc;
1699 std::string layerName = prevLayer->GetName();
1700
1701 switch(activationType)
1702 {
1703 case tflite::ActivationFunctionType_NONE:
1704 {
1705 // this is a no-op: return previous layer
1706 return prevLayer;
1707 }
1708 case tflite::ActivationFunctionType_RELU:
1709 {
1710 activationDesc.m_Function = ActivationFunction::ReLu;
1711 layerName += ":RELU";
1712 break;
1713 }
1714 case tflite::ActivationFunctionType_RELU6:
1715 {
1716 activationDesc.m_Function = ActivationFunction::BoundedReLu;
1717 activationDesc.m_A = 6.0f;
1718 activationDesc.m_B = 0.0f;
1719 layerName += ":RELU6";
1720 break;
1721 }
1722 case tflite::ActivationFunctionType_TANH:
1723 {
1724 activationDesc.m_Function = ActivationFunction::TanH;
1725 activationDesc.m_A = 1.0f;
1726 activationDesc.m_B = 1.0f;
1727 layerName += ":TANH";
1728 break;
1729 }
1730
1731 // I only put these here as a reminder what others we could support
1732 case tflite::ActivationFunctionType_RELU_N1_TO_1:
1733 case tflite::ActivationFunctionType_SIGN_BIT:
1734 default:
1735 {
1736 throw ParseException(
1737 boost::str(
1738 boost::format("TfLite parser doesn't suppport fused activation: "
1739 "%1%/%2% %3% ") %
1740 activationType %
1741 tflite::EnumNameActivationFunctionType(activationType) %
1742 CHECK_LOCATION().AsString()));
1743
1744 }
1745 }
1746
1747 IConnectableLayer* activationLayer =
1748 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1749
1750 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
1751 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
1752 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
1753 return activationLayer;
1754}
1755
1756TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
1757{
1758 if (fileName == nullptr)
1759 {
1760 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
1761 CHECK_LOCATION().AsString()));
1762 }
1763 boost::system::error_code errorCode;
1764 boost::filesystem::path pathToFile(fileName);
1765 if (!boost::filesystem::exists(pathToFile, errorCode))
1766 {
1767 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
1768 fileName %
1769 errorCode %
1770 CHECK_LOCATION().AsString()));
1771 }
1772 std::ifstream file(fileName, std::ios::binary);
1773 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
1774 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
1775 fileContent.size());
1776}
1777
1778TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
1779{
1780 if (binaryContent == nullptr)
1781 {
1782 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
1783 CHECK_LOCATION().AsString()));
1784 }
1785 flatbuffers::Verifier verifier(binaryContent, len);
1786 if (verifier.VerifyBuffer<tflite::Model>() == false)
1787 {
1788 throw ParseException(
1789 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
1790 "flatbuffers format. size:%1% %2%") %
1791 len %
1792 CHECK_LOCATION().AsString()));
1793 }
1794 return tflite::UnPackModel(binaryContent);
1795}
1796
1797TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
1798 size_t subgraphIndex,
1799 size_t operatorIndex)
1800{
1801 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1802
1803 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1804 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1805
1806 size_t inputCount = operatorPtr->inputs.size();
1807 TensorRawPtrVector result(inputCount);
1808 for (size_t i=0; i<inputCount; ++i)
1809 {
1810 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
1811 result[i] = subGraphPtr->tensors[inputId].get();
1812 }
1813 return result;
1814}
1815
1816TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
1817 size_t subgraphIndex,
1818 size_t operatorIndex)
1819{
1820 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1821
1822 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1823 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1824
1825 size_t outputCount = operatorPtr->outputs.size();
1826 TensorRawPtrVector result(outputCount);
1827 for (size_t i=0; i<outputCount; ++i)
1828 {
1829 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
1830 CHECK_TENSOR(model, subgraphIndex, outputId);
1831 result[i] = subGraphPtr->tensors[outputId].get();
1832 }
1833 return result;
1834}
1835
1836TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
1837 size_t subgraphIndex)
1838{
1839 CHECK_SUBGRAPH(model, subgraphIndex);
1840 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1841
1842 size_t inputCount = subGraphPtr->inputs.size();
1843 TensorIdRawPtrVector result(inputCount);
1844 for (size_t i=0; i<inputCount; ++i)
1845 {
1846 uint32_t inputId = CHECKED_NON_NEGATIVE(subGraphPtr->inputs[i]);
1847 CHECK_TENSOR(model, subgraphIndex, inputId);
1848 result[i] = std::make_pair(inputId, subGraphPtr->tensors[inputId].get());
1849 }
1850 return result;
1851}
1852
1853TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
1854 size_t subgraphIndex)
1855{
1856 CHECK_SUBGRAPH(model, subgraphIndex);
1857 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1858
1859 size_t outputCount = subGraphPtr->outputs.size();
1860 TensorIdRawPtrVector result(outputCount);
1861 for (size_t i=0; i<outputCount; ++i)
1862 {
1863 uint32_t outputId = CHECKED_NON_NEGATIVE(subGraphPtr->outputs[i]);
1864 result[i] = std::make_pair(outputId, subGraphPtr->tensors[outputId].get());
1865 }
1866 return result;
1867}
1868
1869std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
1870 size_t subgraphIndex,
1871 size_t operatorIndex)
1872{
1873 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1874 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1875 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1876 return operatorPtr->inputs;
1877}
1878
1879std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
1880 size_t subgraphIndex,
1881 size_t operatorIndex)
1882{
1883 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1884 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1885 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1886 return operatorPtr->outputs;
1887}
1888
1889void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
1890 size_t operatorIndex,
1891 IConnectableLayer* layer,
1892 const std::vector<unsigned int>& tensorIndexes)
1893{
1894 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1895 BOOST_ASSERT(layer != nullptr);
1896 if (tensorIndexes.size() != layer->GetNumInputSlots())
1897 {
1898 throw ParseException(
1899 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
1900 " for subgraph:%3% operator index:%4% %5%") %
1901 tensorIndexes.size() %
1902 layer->GetNumInputSlots() %
1903 subgraphIndex %
1904 operatorIndex %
1905 CHECK_LOCATION().AsString()));
1906 }
1907
1908 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
1909 {
1910 unsigned int tensorIndex = tensorIndexes[slotIndex];
1911 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
1912 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
1913 }
1914}
1915
1916void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
1917 size_t operatorIndex,
1918 IConnectableLayer* layer,
1919 const std::vector<unsigned int>& tensorIndexes)
1920{
1921 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1922 BOOST_ASSERT(layer != nullptr);
1923 if (tensorIndexes.size() != layer->GetNumOutputSlots())
1924 {
1925 throw ParseException(
1926 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
1927 " for subgraph:%3% operator index:%4% %5%") %
1928 tensorIndexes.size() %
1929 layer->GetNumOutputSlots() %
1930 subgraphIndex %
1931 operatorIndex %
1932 CHECK_LOCATION().AsString()));
1933 }
1934
1935 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1936 {
1937 unsigned int tensorIndex = tensorIndexes[slotIndex];
1938 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
1939 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
1940 }
1941}
1942
1943void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
1944{
1945 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1946
1947 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
1948 for (auto const & tensorIdAndPtr : inputs)
1949 {
1950 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
1951 IConnectableLayer* layer =
1952 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
1953
1954 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
1955 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1956
1957 RegisterOutputSlots(subgraphIndex,
1958 VIRTUAL_OPERATOR_ID,
1959 layer,
1960 { static_cast<uint32_t>(tensorIdAndPtr.first) });
1961 }
1962}
1963
1964void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
1965{
1966 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1967
1968 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
1969 for (auto const & tensorIdAndPtr : outputs)
1970 {
1971 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
1972 IConnectableLayer* layer =
1973 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
1974
1975 RegisterInputSlots(subgraphIndex,
1976 VIRTUAL_OPERATOR_ID,
1977 layer,
1978 { static_cast<uint32_t>(tensorIdAndPtr.first) });
1979 }
1980}
1981
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02001982void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
1983{
1984 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1985
1986 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
1987 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
1988 {
1989 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
1990 {
1991 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
1992 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
1993 {
1994 TensorRawPtr tensorPtr = subGraphPtr->tensors[tensorIndex].get();
1995 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
1996 auto tensorAndData = CreateConstTensor(tensorPtr,
1997 tensorInfo,
1998 armnn::Optional<armnn::PermutationVector&>());
1999
2000 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2001 IConnectableLayer *layer =
2002 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2003
2004 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2005 RegisterOutputSlots(subgraphIndex,
2006 VIRTUAL_OPERATOR_ID,
2007 layer,
2008 { tensorIndex });
2009
2010 }
2011 }
2012 }
2013}
2014
telsoa01c577f2c2018-08-31 09:22:23 +01002015// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2016TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2017{
2018 CHECK_BUFFER(model, bufferIndex);
2019 return model->buffers[bufferIndex].get();
2020}
2021
Matteo Martincigh747ef822018-12-18 09:26:39 +00002022template<typename T>
2023std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2024TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2025 TfLiteParser::TensorRawPtr tensorPtr,
2026 armnn::TensorInfo& tensorInfo,
2027 armnn::Optional<armnn::PermutationVector&> permutationVector)
2028{
2029 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2030 tensorPtr,
2031 tensorInfo,
2032 permutationVector);
2033 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2034 return std::make_pair(constData.first, std::move(storage));
2035}
2036
telsoa01c577f2c2018-08-31 09:22:23 +01002037std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2038TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002039 armnn::TensorInfo& tensorInfo,
2040 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002041{
2042 CHECK_TENSOR_PTR(tensorPtr);
2043 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2044 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2045
2046 switch (tensorInfo.GetDataType())
2047 {
2048 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002049 return CreateConstTensorAndStoreData<float>(bufferPtr,
2050 tensorPtr,
2051 tensorInfo,
2052 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002053 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002054 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2055 tensorPtr,
2056 tensorInfo,
2057 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002058 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002059 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2060 tensorPtr,
2061 tensorInfo,
2062 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002063 default:
2064 {
2065 std::stringstream errString;
2066 errString << "Unexpected datatype when creating const tensor: "
2067 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2068 << " shape:" << tensorInfo.GetShape()
2069 << CHECK_LOCATION().AsString();
2070 throw ParseException(errString.str());
2071 }
2072 }
2073}
2074
2075BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2076 const std::string& name) const
2077{
2078 CHECK_SUBGRAPH(m_Model, subgraphId);
2079 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2080 for (auto const & input : inputs)
2081 {
2082 if (input.second->name == name)
2083 {
2084 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2085 return std::make_pair(bindingId, ToTensorInfo(input.second));
2086 }
2087 }
2088
2089 std::stringstream bindings;
2090 for (auto const & input : inputs)
2091 {
2092 bindings << "'" << input.second->name << "' ";
2093 }
2094
2095 throw ParseException(
2096 boost::str(
2097 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2098 "Possible inputs are: [%3%] %4%") %
2099 subgraphId %
2100 name %
2101 bindings.str() %
2102 CHECK_LOCATION().AsString()));
2103}
2104
2105BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2106 const std::string& name) const
2107{
2108 CHECK_SUBGRAPH(m_Model, subgraphId);
2109 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2110 for (auto const & output : outputs)
2111 {
2112 if (output.second->name == name)
2113 {
2114 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
2115 return std::make_pair(bindingId, ToTensorInfo(output.second));
2116 }
2117 }
2118
2119 std::stringstream bindings;
2120 for (auto const & output : outputs)
2121 {
2122 bindings << "'" << output.second->name << "' ";
2123 }
2124
2125 throw ParseException(
2126 boost::str(
2127 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2128 "Possible outputs are: [%3%] %4%") %
2129 subgraphId %
2130 name %
2131 bindings.str() %
2132 CHECK_LOCATION().AsString()));
2133}
2134
2135size_t TfLiteParser::GetSubgraphCount() const
2136{
2137 return m_Model->subgraphs.size();
2138}
2139
2140std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2141{
2142 CHECK_SUBGRAPH(m_Model, subgraphId);
2143 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2144 std::vector<std::string> result;
2145 result.reserve(inputs.size());
2146 for (auto const & input : inputs)
2147 {
2148 result.push_back(input.second->name);
2149 }
2150 return result;
2151}
2152
2153std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2154{
2155 CHECK_SUBGRAPH(m_Model, subgraphId);
2156 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2157 std::vector<std::string> result;
2158 result.reserve(outputs.size());
2159 for (auto const & output : outputs)
2160 {
2161 result.push_back(output.second->name);
2162 }
2163 return result;
2164}
2165
2166ITfLiteParser* ITfLiteParser::CreateRaw()
2167{
2168 return new TfLiteParser();
2169}
2170
2171ITfLiteParserPtr ITfLiteParser::Create()
2172{
2173 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2174}
2175
2176void ITfLiteParser::Destroy(ITfLiteParser* parser)
2177{
2178 delete parser;
2179}
2180
2181TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2182: m_FloatData(std::move(data))
2183, m_Uint8Data(nullptr)
2184, m_Int32Data(nullptr)
2185{
2186}
2187
2188TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2189: m_FloatData(nullptr)
2190, m_Uint8Data(std::move(data))
2191, m_Int32Data(nullptr)
2192{
2193}
2194
2195TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2196: m_FloatData(nullptr)
2197, m_Uint8Data(nullptr)
2198, m_Int32Data(std::move(data))
2199{
2200}
2201
2202} // armnnTfLiteParser