blob: cd0e9214c2f221364ecd7982638b6192daf8f940 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
24
25#include <fstream>
26#include <algorithm>
27#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010028#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000029#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010030
31using namespace armnn;
32using armnn::CheckLocation;
33namespace armnnTfLiteParser
34{
35namespace
36{
jimfly01c25411c2018-11-14 17:47:22 +000037
telsoa01c577f2c2018-08-31 09:22:23 +010038const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
39
40void CheckSubgraph(const TfLiteParser::ModelPtr & model,
41 size_t subgraphIndex,
42 const CheckLocation & location)
43{
44 if (model.get() == nullptr)
45 {
46 throw ParseException(
47 boost::str(
48 boost::format("%1% was called with invalid (null) model. "
49 "Possible reason is that the model is not yet loaded and Unpack(ed). "
50 "subgraph:%2% at %3%") %
51 location.m_Function %
52 subgraphIndex %
53 location.FileLine()));
54 }
55 else if (subgraphIndex >= model->subgraphs.size())
56 {
57 throw ParseException(
58 boost::str(
59 boost::format("%1% was called with an invalid subgraph index. "
60 "subgraph:%2% at %3%") %
61 location.m_Function %
62 subgraphIndex %
63 location.FileLine()));
64 }
65}
66
67#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
68 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
69
70void CheckModel(const TfLiteParser::ModelPtr & model,
71 size_t subgraphIndex,
72 size_t operatorIndex,
73 const CheckLocation & location)
74{
75 if (model.get() == nullptr)
76 {
77 throw ParseException(
78 boost::str(
79 boost::format("%1% was called with invalid (null) model. "
80 "Possible reason is that the model is not yet loaded and Unpack(ed). "
81 "subgraph:%2% operator:%3% at %4%") %
82 location.m_Function %
83 subgraphIndex %
84 operatorIndex %
85 location.FileLine()));
86 }
87 else if (subgraphIndex >= model->subgraphs.size())
88 {
89 throw ParseException(
90 boost::str(
91 boost::format("%1% was called with an invalid subgraph index. "
92 "subgraph:%2% operator:%3% at %4%") %
93 location.m_Function %
94 subgraphIndex %
95 operatorIndex %
96 location.FileLine()));
97 }
98 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
99 operatorIndex != VIRTUAL_OPERATOR_ID)
100 {
101 throw ParseException(
102 boost::str(
103 boost::format("%1% was called with an invalid operator index. "
104 "subgraph:%2% operator:%3% at %4%") %
105 location.m_Function %
106 subgraphIndex %
107 operatorIndex %
108 location.FileLine()));
109 }
110}
111
112#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
113 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
114
115void CheckTensor(const TfLiteParser::ModelPtr & model,
116 size_t subgraphIndex,
117 size_t tensorIndex,
118 const CheckLocation & location)
119{
120 // not checking model, because I assume CHECK_MODEL already run
121 // and checked that. An assert would do.
122 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
123
124 // also subgraph index should be checked by CHECK_MODEL so
125 // I only add an assert here
126 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
127
128 // the tensor index is the only one to check here
129 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
130 {
131 throw ParseException(
132 boost::str(
133 boost::format("%1% was called with an invalid tensor index. "
134 "subgraph:%2% tensor:%3% at %4%") %
135 location.m_Function %
136 subgraphIndex %
137 tensorIndex %
138 location.FileLine()));
139 }
140}
141
142#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
143 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
144
145void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
146 const CheckLocation & location)
147{
148 if (rawPtr == nullptr)
149 {
150 throw ParseException(
151 boost::str(
152 boost::format("%1% was called with a null tensor pointer. "
153 "at %2%") %
154 location.m_Function %
155 location.FileLine()));
156
157 }
158}
159
160#define CHECK_TENSOR_PTR(TENSOR_PTR) \
161 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
162
163void CheckBuffer(const TfLiteParser::ModelPtr & model,
164 size_t bufferIndex,
165 const CheckLocation & location)
166{
167 if (model.get() == nullptr)
168 {
169 throw ParseException(
170 boost::str(
171 boost::format("%1% was called with invalid (null) model. "
172 "Possible reason is that the model is not yet loaded and Unpack(ed). "
173 "buffer:%2% at %3%") %
174 location.m_Function %
175 bufferIndex %
176 location.FileLine()));
177 }
178 else if (bufferIndex >= model->buffers.size())
179 {
180 throw ParseException(
181 boost::str(
182 boost::format("%1% was called with an invalid buffer index. "
183 "buffer index:%2% at %3%") %
184 location.m_Function %
185 bufferIndex %
186 location.FileLine()));
187 }
188 else if (model->buffers[bufferIndex].get() == nullptr)
189 {
190 throw ParseException(
191 boost::str(
192 boost::format("The buffer #%1% is null. %3%") %
193 bufferIndex %
194 location.AsString()));
195 }
196}
197
198#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
199 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
200
201void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
202 const armnn::TensorInfo & tensorInfo,
203 uint32_t bufferId,
204 const CheckLocation & location)
205{
206 if (bufferPtr == nullptr)
207 {
208 throw ParseException(
209 boost::str(
210 boost::format("BufferPtr is null for buffer:%1%. %2%") %
211 bufferId %
212 location.AsString()));
213 }
214 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
215 tensorInfo.GetNumBytes() > bufferPtr->data.size())
216 {
217 std::stringstream ss;
218 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
219 << "For tensor: " << tensorInfo.GetShape()
220 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
221 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
222 throw ParseException(ss.str());
223 }
224}
225
226#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
227 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
228
229bool IsActivationSupported(tflite::ActivationFunctionType activationType)
230{
231 switch(activationType)
232 {
233 case tflite::ActivationFunctionType_NONE:
234 case tflite::ActivationFunctionType_RELU:
235 case tflite::ActivationFunctionType_RELU6:
236 case tflite::ActivationFunctionType_TANH:
237 {
238 return true;
239 }
240 default:
241 {
242 return false;
243 }
244 }
245}
246
247#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
248 do { \
249 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
250 { \
251 throw ParseException( \
252 boost::str( \
253 boost::format("TfLite parser doesn't suppport fused activation: " \
254 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
255 OPTION->fused_activation_function % \
256 tflite::EnumNameActivationFunctionType(\
257 OPTION->fused_activation_function) % \
258 __func__ % \
259 SUBGRAPH_INDEX % \
260 OPERATOR_INDEX % \
261 CHECK_LOCATION().FileLine())); \
262 } \
263 } while(false)
264
265
266std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
267{
268 std::vector<unsigned int> result;
269 result.reserve(in.size());
270 for (auto & i : in)
271 {
272 result.push_back(CHECKED_NON_NEGATIVE(i));
273 }
274 return result;
275}
276
277void CalcPadding(uint32_t inputSize,
278 uint32_t filterSize,
279 uint32_t stride,
280 uint32_t& paddingFront,
281 uint32_t& paddingBack,
282 tflite::Padding padding)
283{
284 paddingFront = 0;
285 paddingBack = 0;
286 if (padding == tflite::Padding_SAME)
287 {
288 uint32_t outputSize = (inputSize + stride - 1) / stride;
289 uint32_t temp = (outputSize - 1) * stride + filterSize;
290 if (temp > inputSize)
291 {
292 paddingFront = (temp - inputSize) / 2;
293 paddingBack = (temp - inputSize) - paddingFront;
294 }
295 }
296}
297
298armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
299{
300 armnn::DataType type;
301 CHECK_TENSOR_PTR(tensorPtr);
302
303 switch (tensorPtr->type)
304 {
305 case tflite::TensorType_UINT8:
306 type = armnn::DataType::QuantisedAsymm8;
307 break;
308 case tflite::TensorType_FLOAT32:
309 type = armnn::DataType::Float32;
310 break;
311 case tflite::TensorType_INT32:
312 type = armnn::DataType::Signed32;
313 break;
314
315 default:
316 {
317 CheckLocation location = CHECK_LOCATION();
318 throw ParseException(
319 boost::str(
320 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
321 tensorPtr->type %
322 tflite::EnumNameTensorType(tensorPtr->type) %
323 tensorPtr->name %
324 location.AsString()));
325 }
326 }
327
328 float quantizationScale = 0.0f;
329 int32_t quantizationOffset = 0;
330
331 if (tensorPtr->quantization.get())
332 {
333 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
334 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
335
336 if (tensorPtr->quantization->scale.size() == 1)
337 {
338 quantizationScale = tensorPtr->quantization->scale[0];
339 }
340 if (tensorPtr->quantization->zero_point.size() == 1)
341 {
342 // NOTE: we lose precision here when converting from 64 bit to 32
343 // but this is what we support at the monent in ArmNN
344 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
345 }
346 }
347
348 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
349
350 // two statements (on purpose) for easier debugging:
351 armnn::TensorInfo result(static_cast<unsigned int>(tensorPtr->shape.size()),
352 dimensions.data(),
353 type,
354 quantizationScale,
355 quantizationOffset);
356 return result;
357}
358
359template<typename T>
360std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
361CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
362 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000363 armnn::TensorInfo& tensorInfo,
364 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100365{
366 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
367 BOOST_ASSERT_MSG(bufferPtr != nullptr,
368 boost::str(
369 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
370
371 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000372
373 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
374 {
375 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000376 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
377 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000378 }
379 else
380 {
381 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
382 }
383
telsoa01c577f2c2018-08-31 09:22:23 +0100384 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
385}
386
telsoa01c577f2c2018-08-31 09:22:23 +0100387armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
388{
389 // generate the binding id by shifting the tensor id by 8 bit
390 // and add the subgraph id, which allows 256 subgraphs
391 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
392}
393
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000394bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
395{
396 const unsigned int actualSize = actual.GetNumDimensions();
397 if (actualSize != expected.size())
398 {
399 return false;
400 }
401
402 for (unsigned int i = 0u; i < actualSize; i++)
403 {
404 if (expected[i] < 0 ||
405 actual[i] != static_cast<unsigned int>(expected[i]))
406 {
407 return false;
408 }
409 }
410
411 return true;
412}
413
telsoa01c577f2c2018-08-31 09:22:23 +0100414} // <anonymous>
415
416TfLiteParser::TfLiteParser()
417: m_Network(nullptr, nullptr)
418, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
419{
420 // register supported operators
421 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200422 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100423 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100424 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
425 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000426 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100427 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000428 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100429 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200430 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200431 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan58f39192018-09-17 14:14:39 +0100432 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
433 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100434 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -0200435 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
Sadik Armagan479045b2018-10-01 11:51:37 +0100436 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
Bruno Goncalvesbaded142019-02-08 19:02:48 -0200437 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100438 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalves451d95b2019-02-12 22:59:22 -0200439 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200440 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200441 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200442 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200443 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200444 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
telsoa01c577f2c2018-08-31 09:22:23 +0100445}
446
447void TfLiteParser::ResetParser()
448{
449 m_Network = armnn::INetworkPtr(nullptr, nullptr);
450 m_Model = nullptr;
451 m_SubgraphConnections.clear();
452}
453
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200454void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
455 size_t operatorIndex,
456 IConnectableLayer *layer)
457{
458 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
459 BOOST_ASSERT(layer != nullptr);
460
461 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
462 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
463
464 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
465
466 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
467 TensorRawPtr tensorPtr = subGraphPtr->tensors[reshapedInputId].get();
468 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
469 TensorRawPtr tensorPtr1 = subGraphPtr->tensors[inputId].get();
470
471 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
472 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
473
474 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
475 {
476 uint32_t id = reshapedInputId;
477 reshapedInputId = inputId;
478 inputId = id;
479
480 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
481 inputTensorInfo = ToTensorInfo(tensorPtr);
482 }
483
484 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
485
486 std::vector<unsigned> reshapedDim;
487 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
488 {
489 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
490 }
491
492 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
493 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
494
495 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
496
497 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
498 armnn::ReshapeDescriptor desc;
499 desc.m_TargetShape = reshapedTensorInfo.GetShape();
500 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
501
502 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
503 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
504
505 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
506
507 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
508 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
509}
510
telsoa01c577f2c2018-08-31 09:22:23 +0100511INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
512{
513 ResetParser();
514 m_Model = LoadModelFromFile(graphFile);
515 return CreateNetworkFromModel();
516}
517
518INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
519{
520 ResetParser();
521 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
522 return CreateNetworkFromModel();
523}
524
525INetworkPtr TfLiteParser::CreateNetworkFromModel()
526{
527 m_Network = INetwork::Create();
528 BOOST_ASSERT(m_Model.get() != nullptr);
529
530 bool failedToCreate = false;
531 std::stringstream errors;
532
533 if (m_Model->subgraphs.size() != 1)
534 {
535 throw ParseException(
536 boost::str(
537 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
538 m_Model->subgraphs.size() %
539 CHECK_LOCATION().AsString()));
540 }
541
542 size_t subgraphIndex = 0;
543 for (SubGraphPtr const & subgraph : m_Model->subgraphs)
544 {
545 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
546
547 size_t operatorIndex = 0;
548 for (OperatorPtr const & op : subgraph->operators)
549 {
550 try
551 {
telsoa01c577f2c2018-08-31 09:22:23 +0100552 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
553 auto builtinCode = opCodePtr->builtin_code;
554
555 if (builtinCode > tflite::BuiltinOperator_MAX)
556 {
557 throw ParseException(
558 boost::str(
559 boost::format("Operator code %1% is out of range 0-%2%. "
560 "subgraph:%3% operator idx:%4%. %5%") %
561 builtinCode %
562 tflite::BuiltinOperator_MAX %
563 subgraphIndex %
564 operatorIndex %
565 CHECK_LOCATION().AsString()));
566 }
567
568 // lookup and call the parser function
569 auto & parserFunction = m_ParserFunctions[builtinCode];
570 (this->*parserFunction)(subgraphIndex, operatorIndex);
571 }
572 catch (const ParseException& e)
573 {
574 failedToCreate = true;
575 std::stringstream errorString;
576
577 errorString << "Failed to parse operator #" << operatorIndex
578 << " within subgraph #" << subgraphIndex
579 << " error: " << e.what();
580 BOOST_LOG_TRIVIAL(error) << errorString.str();
581
582 errors << errorString.str() << "\n";
583 }
584 ++operatorIndex;
585 }
586
587 SetupInputLayers(subgraphIndex);
588 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200589 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100590
591 ++subgraphIndex;
592 }
593
594 if (failedToCreate)
595 {
596 // we can skip everything and let the outer exception handler deal with the error
597 throw ParseException(errors.str());
598 }
599
600 // establish the connections from the layer outputs to the inputs of the subsequent layers
601 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
602 {
603 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
604 {
605 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
606 {
607 for (size_t inputSlotIdx = 0;
608 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
609 ++inputSlotIdx)
610 {
611 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
612 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
613 }
614 }
615 }
616 }
617
618 return std::move(m_Network);
619}
620
621void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
622 size_t tensorIndex,
623 armnn::IOutputSlot* slot)
624{
625 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
626 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
627 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
628
629 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
630
631 // assuming there is only one producer for that tensor
632 if (tensorSlots.outputSlot != nullptr)
633 {
634 throw ParseException(boost::str(
635 boost::format("Another layer has already registered itself as the producer of "
636 "subgraph:%1% tensor:%2% %3%") %
637 subgraphIndex %
638 tensorIndex %
639 CHECK_LOCATION().AsString()));
640 }
641
642 tensorSlots.outputSlot = slot;
643}
644
645void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
646 size_t tensorIndex,
647 armnn::IInputSlot* slot)
648{
649 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
650 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
651 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
652
653 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
654 tensorSlots.inputSlots.push_back(slot);
655}
656
657void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
658{
659 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
660 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
661 //
662 auto opcodeIndex = operatorPtr->opcode_index;
663 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
664
665 throw ParseException(
666 boost::str(
667 boost::format("Operator not supported. "
668 "subgraph:%1% operator:%2% "
669 "opcode_index:%3% opcode:%4% / %5% %6%") %
670 subgraphIndex %
671 operatorIndex %
672 opcodeIndex %
673 opcode %
674 tflite::EnumNameBuiltinOperator(opcode) %
675 CHECK_LOCATION().AsString()));
676}
677
telsoa01c577f2c2018-08-31 09:22:23 +0100678void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
679{
680 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
681
682 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
683 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
684
685 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
686
687 Convolution2dDescriptor desc;
688 desc.m_BiasEnabled = false;
689 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
690 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000691 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100692
693 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
694 CHECK_VALID_SIZE(inputs.size(), 2, 3);
695
696 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
697 CHECK_VALID_SIZE(outputs.size(), 1);
698
699 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
700 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
701
702 // assuming input is NHWC
703 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
704 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
705
706 // assuming the filter is OHWI : Output, H, W, Input
707 // which is essentially the same as NHWC
708 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
709 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
710
711 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
712 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
713
Matteo Martincigh747ef822018-12-18 09:26:39 +0000714 auto filterTensorAndData = CreateConstTensor(inputs[1],
715 filterTensorInfo,
716 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100717 armnn::IConnectableLayer* layer;
718
719 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
720
721 if (inputs.size() == 3)
722 {
723 desc.m_BiasEnabled = true;
724 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000725 auto biasTensorAndData = CreateConstTensor(inputs[2],
726 biasTensorInfo,
727 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100728 layer = m_Network->AddConvolution2dLayer(desc,
729 filterTensorAndData.first,
730 biasTensorAndData.first,
731 layerName.c_str());
732 }
733 else
734 {
735 layer = m_Network->AddConvolution2dLayer(desc,
736 filterTensorAndData.first,
737 layerName.c_str());
738 }
739
740 BOOST_ASSERT(layer != nullptr);
741
telsoa01c577f2c2018-08-31 09:22:23 +0100742 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000743 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100744
745 // register the input connection slots for the layer, connections are made after all layers have been created
746 // only the tensors for the inputs are relevant, exclude the const tensors
747 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000748 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100749
jimfly01c25411c2018-11-14 17:47:22 +0000750 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100751 // register the output connection slots for the layer, connections are made after all layers have been created
752 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
753 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
754}
755
756void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
757{
758 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
759
760 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
761 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
762
763 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
764
765 DepthwiseConvolution2dDescriptor desc;
766 desc.m_BiasEnabled = false;
767 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
768 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000769 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100770 // ACL only supports a depth (channel) multiplier of 1, it is not currently stored in the descriptor
771 CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1);
772
773 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
774 CHECK_VALID_SIZE(inputs.size(), 2, 3);
775 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
776 CHECK_VALID_SIZE(outputs.size(), 1);
777
778 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
779 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
780
Matteo Martincigh747ef822018-12-18 09:26:39 +0000781 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100782 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
783 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000784
785 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100786 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
787 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
788
Matteo Martincigh747ef822018-12-18 09:26:39 +0000789 // Reshape weights as [ H, W, I, M ]
790 filterTensorInfo.SetShape({ filterHeight,
791 filterWidth,
792 inputTensorInfo.GetShape()[3],
793 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
794
795 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
796 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
797
telsoa01c577f2c2018-08-31 09:22:23 +0100798 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
799 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
800
Matteo Martincigh747ef822018-12-18 09:26:39 +0000801 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100802 armnn::IConnectableLayer* layer;
803 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
804
805 if (inputs.size() == 3)
806 {
807 desc.m_BiasEnabled = true;
808 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000809 auto biasTensorAndData = CreateConstTensor(inputs[2],
810 biasTensorInfo,
811 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100812 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
813 filterTensorAndData.first,
814 biasTensorAndData.first,
815 layerName.c_str());
816 }
817 else
818 {
819 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
820 filterTensorAndData.first,
821 layerName.c_str());
822 }
823 BOOST_ASSERT(layer != nullptr);
824
telsoa01c577f2c2018-08-31 09:22:23 +0100825 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000826 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100827
828 // register the input connection slots for the layer, connections are made after all layers have been created
829 // only the tensors for the inputs are relevant, exclude the const tensors
830 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000831 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100832
jimfly01c25411c2018-11-14 17:47:22 +0000833 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100834 // register the output connection slots for the layer, connections are made after all layers have been created
835 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
836 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
837}
838
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100839void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
840{
841 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
842}
843
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200844void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
845{
846 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
847
848 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
849 CHECK_VALID_SIZE(inputs.size(), 3);
850
851 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
852 CHECK_VALID_SIZE(outputs.size(), 1);
853
854 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
855 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
856
857 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
858 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
859
860 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
861 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
862
863 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
864 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
865
866 size_t step = 2;
867 std::vector<std::pair<unsigned int, unsigned int>> crops;
868 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
869 {
870 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
871 }
872
873 armnn::BatchToSpaceNdDescriptor desc;
874 desc.m_BlockShape = blockShape;
875 desc.m_Crops = crops;
876 desc.m_DataLayout = armnn::DataLayout::NHWC;
877
878 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
879
880 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
881 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
882
883 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
884
885 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
886 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
887
888 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
889 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
890}
891
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100892void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
893{
894 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
895}
896
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200897void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
898{
899 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
900
901 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
902 CHECK_VALID_SIZE(inputs.size(), 2);
903
904 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
905 CHECK_VALID_SIZE(outputs.size(), 1);
906
907 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
908 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
909
910 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
911 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
912
913 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
914 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
915
916 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
917 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
918 {
919 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
920 }
921 else
922 {
923 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
924 }
925
926 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
927 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
928}
929
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200930void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
931{
932 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
933
934 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
935 CHECK_VALID_SIZE(inputs.size(), 2);
936
937 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
938 CHECK_VALID_SIZE(outputs.size(), 1);
939
940 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
941 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
942
943 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
944 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
945
946 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
947 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
948
949 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
950 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
951 {
952 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
953 }
954 else
955 {
956 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
957 }
958
959 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
960 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
961}
962
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100963void TfLiteParser::ParsePool(size_t subgraphIndex,
964 size_t operatorIndex,
965 PoolingAlgorithm algorithm)
966{
967 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
968
969 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
970 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
971
972 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
973
974 std::string layerName;
975
976 switch (algorithm)
977 {
978 case PoolingAlgorithm::Average:
979 layerName =
980 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
981 break;
982 case PoolingAlgorithm::Max:
983 layerName =
984 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
985 break;
986 default:
987 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
988 }
989
990 Pooling2dDescriptor desc;
991
992 desc.m_PoolType = algorithm;
993 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
994 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
995 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
996 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
997 desc.m_PaddingMethod = PaddingMethod::Exclude;
998 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +0000999 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001000
1001 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1002 CHECK_VALID_SIZE(inputs.size(), 1);
1003 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1004
1005 // assuming input is NHWC
1006 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1007 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1008
1009 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1010 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
1011
1012 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1013 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001014
1015 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1016
1017 BOOST_ASSERT(layer != nullptr);
1018
jimfly01c25411c2018-11-14 17:47:22 +00001019 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1020 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001021
1022 // register the input connection slots for the layer, connections are made after all layers have been created
1023 // only the tensors for the inputs are relevant, exclude the const tensors
1024 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001025 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001026
jimfly01c25411c2018-11-14 17:47:22 +00001027 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001028 // register the output connection slots for the layer, connections are made after all layers have been created
1029 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1030 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1031}
1032
telsoa01c577f2c2018-08-31 09:22:23 +01001033void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1034{
1035 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1036 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1037 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1038
1039 SoftmaxDescriptor desc;
1040 desc.m_Beta = options->beta;
1041
1042 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1043 CHECK_VALID_SIZE(inputs.size(), 1);
1044 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1045 CHECK_VALID_SIZE(outputs.size(), 1);
1046
1047 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1048 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1049
1050 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1051 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1052
1053 // register the input connection slots for the layer, connections are made after all layers have been created
1054 // only the tensors for the inputs are relevant, exclude the const tensors
1055 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1056 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1057
1058 // register the output connection slots for the layer, connections are made after all layers have been created
1059 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1060 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1061}
1062
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001063void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1064{
1065 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1066
1067 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1068 CHECK_VALID_SIZE(inputs.size(), 3);
1069
1070 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1071 CHECK_VALID_SIZE(outputs.size(), 1);
1072
1073 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1074 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1075
1076 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1077 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1078
1079 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1080 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1081
1082 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1083 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1084
1085 size_t step = 2;
1086 std::vector<std::pair<unsigned int, unsigned int>> padList;
1087 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1088 {
1089 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1090 }
1091
1092 armnn::SpaceToBatchNdDescriptor desc;
1093 desc.m_BlockShape = blockShape;
1094 desc.m_PadList = padList;
1095 desc.m_DataLayout = armnn::DataLayout::NHWC;
1096
1097 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1098
1099 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1100 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1101
1102 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1103
1104 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1105 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1106
1107 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1108 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1109}
1110
telsoa01c577f2c2018-08-31 09:22:23 +01001111armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1112 const armnn::TensorInfo & inputTensorInfo)
1113{
1114 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1115 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1116 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1117
1118 if (inputTensorInfo.GetNumDimensions() > 4)
1119 {
1120 std::stringstream ss;
1121 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1122 << " shape:" << inputTensorInfo.GetShape() << " "
1123 << CHECK_LOCATION().AsString();
1124 throw ParseException(ss.str());
1125 }
1126
1127 if (squeezeDims.empty())
1128 {
1129 squeezeDims.assign(dimensionSequence,
1130 dimensionSequence+inputTensorInfo.GetNumDimensions());
1131 }
1132
1133 std::vector<uint32_t> outputDims;
1134 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1135 {
1136 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1137 auto currentDimension = inputTensorInfo.GetShape()[i];
1138 if (skipSqueeze || currentDimension != 1)
1139 {
1140 outputDims.push_back(currentDimension);
1141 }
1142 }
1143
1144 if (outputDims.size() > 4)
1145 {
1146 std::stringstream ss;
1147 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1148 << " shape:" << inputTensorInfo.GetShape() << " "
1149 << CHECK_LOCATION().AsString();
1150 throw ParseException(ss.str());
1151 }
1152
1153 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1154 outputDims.data());
1155
1156 // we need to preserve the tensor type and the quantization data as well
1157 TensorInfo outTensorInfo = inputTensorInfo;
1158 outTensorInfo.SetShape(outShape);
1159
1160 return outTensorInfo;
1161}
1162
1163void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1164{
1165 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1166
1167 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1168 CHECK_VALID_SIZE(inputs.size(), 1);
1169
1170 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1171 CHECK_VALID_SIZE(outputs.size(), 1);
1172
1173 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1174 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1175
1176 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1177 armnn::TensorInfo outputTensorInfo =
1178 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1179 inputTensorInfo);
1180
1181 ReshapeDescriptor reshapeDesc;
1182 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1183
1184 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1185 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1186 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1187
1188 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1189 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1190
1191 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1192 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1193}
1194
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001195void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1196{
1197 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1198
1199 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1200 CHECK_VALID_SIZE(inputs.size(), 4);
1201
1202 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1203 CHECK_VALID_SIZE(outputs.size(), 1);
1204
1205 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1206 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1207
1208 StridedSliceDescriptor desc;
1209 desc.m_BeginMask = options->begin_mask;
1210 desc.m_EllipsisMask = options->ellipsis_mask;
1211 desc.m_EndMask = options->end_mask;
1212 desc.m_NewAxisMask = options->new_axis_mask;
1213 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1214 desc.m_DataLayout = armnn::DataLayout::NHWC;
1215
1216 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1217 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1218
1219 std::vector<int> begin(beginTensorInfo.GetNumElements());
1220 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1221
1222 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1223 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1224
1225 std::vector<int> end(endTensorInfo.GetNumElements());
1226 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1227
1228 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1229 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1230
1231 std::vector<int> stride(strideTensorInfo.GetNumElements());
1232 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1233
1234 desc.m_Begin = begin;
1235 desc.m_End = end;
1236 desc.m_Stride = stride;
1237
1238 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1239 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1240
1241 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1242 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1243
1244 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1245 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1246
1247 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1248 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1249}
1250
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001251void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1252{
1253 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1254
1255 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1256 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1257
1258 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1259 CHECK_VALID_SIZE(inputs.size(), 2);
1260
1261 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1262 CHECK_VALID_SIZE(outputs.size(), 1);
1263
1264 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1265 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1266
1267 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1268 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1269
1270 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1271 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1272
1273 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1274 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1275 {
1276 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1277 }
1278 else
1279 {
1280 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1281 }
1282
1283 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1284
1285 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1286 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1287}
1288
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001289void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1290{
1291 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1292
1293 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1294 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1295
1296 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1297 CHECK_VALID_SIZE(inputs.size(), 2);
1298
1299 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1300 CHECK_VALID_SIZE(outputs.size(), 1);
1301
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001302 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1303 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1304
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001305 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1306 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1307
1308 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1309 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1310
1311 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001312 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1313 {
1314 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1315 }
1316 else
1317 {
1318 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1319 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001320
1321 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1322
1323 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1324 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1325}
1326
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001327void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1328{
1329 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1330
1331 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1332 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1333
1334 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1335 CHECK_VALID_SIZE(inputs.size(), 2);
1336
1337 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1338 CHECK_VALID_SIZE(outputs.size(), 1);
1339
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001340 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1341 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1342
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001343 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1344 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1345
1346 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1347 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1348
1349 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001350 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1351 {
1352 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1353 }
1354 else
1355 {
1356 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1357 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001358
1359 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1360
1361 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1362 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1363}
1364
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001365void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1366{
1367 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1368
1369 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1370
1371 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1372 CHECK_VALID_SIZE(outputs.size(), 1);
1373
1374 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1375 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1376
1377 armnn::MeanDescriptor desc;
1378 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1379 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1380 desc.m_Axis = axis;
1381
1382 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1383 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1384
1385 desc.m_KeepDims =
1386 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1387 true : false;
1388
1389 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1390 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1391
1392 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1393
1394 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1395 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1396
1397 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1398 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1399}
1400
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001401void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1402{
1403 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1404
1405 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1406
1407 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1408 CHECK_VALID_SIZE(outputs.size(), 1);
1409
1410 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1411 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1412
1413 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1414 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1415
1416 size_t step = 2;
1417 armnn::PadDescriptor desc;
1418 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1419 {
1420 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1421 }
1422
1423 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1424 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1425
1426 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1427 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1428
1429 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1430 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1431
1432 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1433 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1434}
1435
Finn Williamsc42c3842019-01-22 14:18:11 +00001436
Sadik Armagan58f39192018-09-17 14:14:39 +01001437void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1438{
Finn Williamsc42c3842019-01-22 14:18:11 +00001439 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001440}
1441
1442void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1443{
Finn Williamsc42c3842019-01-22 14:18:11 +00001444 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1445}
Sadik Armagan58f39192018-09-17 14:14:39 +01001446
Finn Williamsc42c3842019-01-22 14:18:11 +00001447void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1448{
1449 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1450}
1451
1452
1453void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1454{
1455 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001456 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1457 boost::ignore_unused(operatorPtr);
1458
1459 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1460 CHECK_VALID_SIZE(inputs.size(), 1);
1461
1462 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1463 CHECK_VALID_SIZE(outputs.size(), 1);
1464
Finn Williamsc42c3842019-01-22 14:18:11 +00001465 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001466 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001467 activationDesc.m_Function = activationType;
1468
1469 switch (activationType)
1470 {
1471 case ActivationFunction::ReLu:
1472 {
1473 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1474 break;
1475 }
1476 case ActivationFunction::BoundedReLu:
1477 {
1478 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1479 activationDesc.m_A = 6.0f;
1480 activationDesc.m_B = 0.0f;
1481 break;
1482 }
1483 case ActivationFunction::Sigmoid:
1484 {
1485 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1486 break;
1487 }
1488 default:
1489 {
1490 throw ParseException(
1491 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1492 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1493 }
1494 }
1495
1496 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001497
1498 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1499 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1500
1501 // register the input connection slots for the layer, connections are made after all layers have been created
1502 // only the tensors for the inputs are relevant, exclude the const tensors
1503 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1504 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1505
1506 // register the output connection slots for the layer, connections are made after all layers have been created
1507 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1508 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1509}
Sadikb94967b2018-09-19 15:30:00 +01001510armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1511 const std::vector<int32_t> & targetDimsIn)
1512{
1513 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1514 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1515
1516 if (stretchDim != targetDimsIn.end())
1517 {
1518 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1519 {
1520 throw ParseException(
1521 boost::str(
1522 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1523 }
1524
1525 auto targetNumElements =
1526 boost::numeric_cast<unsigned int>(
1527 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1528
1529 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1530 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1531 }
1532
1533 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1534
1535 TensorInfo reshapeInfo = inputTensorInfo;
1536 reshapeInfo.SetShape(outputShape);
1537
1538 return reshapeInfo;
1539}
1540
1541void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1542{
1543 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1544
1545 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001546
1547 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1548 CHECK_VALID_SIZE(outputs.size(), 1);
1549
1550 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1551 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1552
1553 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001554 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1555 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001556 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1557
kevmay0171972a82018-12-17 14:28:03 +00001558 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001559 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1560 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001561 {
1562 std::stringstream ss;
1563 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001564 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001565 << " does not equal output shape "
1566 << actualOutputTensorInfo.GetShape()
1567 << ": "
1568 << CHECK_LOCATION().AsString();
1569 throw ParseException(ss.str());
1570 }
1571
Sadikb94967b2018-09-19 15:30:00 +01001572 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001573 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001574
1575 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1576 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001577 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001578
1579 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1580 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1581
1582 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1583 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1584}
1585
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001586void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1587{
1588 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1589
1590 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1591 CHECK_VALID_SIZE(inputs.size(), 2);
1592
1593 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1594 CHECK_VALID_SIZE(outputs.size(), 1);
1595
1596 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1597
1598 // Data for the parsed tensor args (size) must be stored locally.
1599 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1600
1601 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1602 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1603
1604 ResizeBilinearDescriptor desc;
1605 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
1606 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1607 desc.m_DataLayout = armnn::DataLayout::NHWC;
1608
1609 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
1610 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, layerName.c_str());
1611
1612 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1613 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1614
1615 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1616 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1617
1618 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1619 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1620}
1621
Sadik Armagan479045b2018-10-01 11:51:37 +01001622void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1623{
1624 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1625
1626 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1627 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1628
1629 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1630
1631 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1632 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1633 CHECK_VALID_SIZE(outputs.size(), 1);
1634
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001635 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1636 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001637
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001638 const unsigned int concatDimInput = static_cast<unsigned int>(
1639 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001640
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001641 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1642 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001643
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001644 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001645
1646 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1647 {
1648 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1649
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001650 // This set up concatDescriptor view origin
1651 armnnUtils::ProcessConcatInputTensorInfo(
1652 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001653 }
1654
1655 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
1656 IConnectableLayer* layer = m_Network->AddMergerLayer(concatDescriptor, layerName.c_str());
1657
1658 BOOST_ASSERT(layer != nullptr);
1659
1660 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1661 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001662
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001663 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001664
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001665 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001666
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001667 // add fused activation layer
1668 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001669
Sadik Armagan479045b2018-10-01 11:51:37 +01001670 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1671 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1672}
1673
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001674void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1675{
1676 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1677
1678 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1679 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1680
1681 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1682
1683 FullyConnectedDescriptor desc;
1684 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001685 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001686
1687 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1688 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1689 CHECK_VALID_SIZE(outputs.size(), 1);
1690
1691 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1692
1693 // Fully Connected Layer accepts two dimensional weights input
1694 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1695 if (weightsDimension != 2)
1696 {
1697 throw ParseException(
1698 boost::str(
1699 boost::format(
1700 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1701 "Node %2%")
1702 % weightsDimension
1703 % CHECK_LOCATION().AsString()));
1704 }
1705
Matteo Martincigh747ef822018-12-18 09:26:39 +00001706 auto filterTensorAndData = CreateConstTensor(inputs[1],
1707 filterTensorInfo,
1708 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001709 armnn::IConnectableLayer* layer;
1710 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1711
1712 if (inputs.size() == 3)
1713 {
1714 desc.m_BiasEnabled = true;
1715 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001716 auto biasTensorAndData = CreateConstTensor(inputs[2],
1717 biasTensorInfo,
1718 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001719 layer = m_Network->AddFullyConnectedLayer(desc,
1720 filterTensorAndData.first,
1721 biasTensorAndData.first,
1722 layerName.c_str());
1723 }
1724 else
1725 {
1726 layer = m_Network->AddFullyConnectedLayer(desc,
1727 filterTensorAndData.first,
1728 layerName.c_str());
1729 }
1730 BOOST_ASSERT(layer != nullptr);
1731
1732 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1733 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1734
1735 // register the input connection slot for the layer
1736 // only the tensors for the inputs are relevant, exclude the const tensors
1737 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1738 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1739
1740 // we need to add the activation layer and fortunately we don't need to care about the data layout
1741 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1742 options->fused_activation_function);
1743 // register the output connection slots for the layer, connections are made after all layers have been created
1744 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1745 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1746}
1747
keidav011b3e2ea2019-02-21 10:07:37 +00001748void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1749{
1750 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1751
1752 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1753
1754 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1755 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1756 CHECK_VALID_SIZE(outputs.size(), 4);
1757
1758 // Obtain custom options from flexbuffers
1759 auto custom_options = operatorPtr->custom_options;
1760 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1761
1762 // Obtain descriptor information from tf lite
1763 DetectionPostProcessDescriptor desc;
1764 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1765 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1766 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1767 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1768 desc.m_NumClasses = m["num_classes"].AsUInt32();
1769 desc.m_ScaleH = m["h_scale"].AsFloat();
1770 desc.m_ScaleW = m["w_scale"].AsFloat();
1771 desc.m_ScaleX = m["x_scale"].AsFloat();
1772 desc.m_ScaleY = m["y_scale"].AsFloat();
1773
keidav0107d58c72019-02-26 11:57:39 +00001774 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00001775 {
keidav0107d58c72019-02-26 11:57:39 +00001776 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00001777 }
1778 if (!(m["detections_per_class"].IsNull()))
1779 {
1780 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1781 }
1782
1783 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1784 {
1785 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1786 "must be positive and less than or equal to 1.");
1787 }
1788
1789 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
1790 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
1791 armnn::Optional<armnn::PermutationVector&>());
1792
1793 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
1794 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
1795 layerName.c_str());
1796
1797 BOOST_ASSERT(layer != nullptr);
1798
1799 // Register outputs
1800 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
1801 {
1802 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i]);
1803 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
1804 }
1805
1806 // Register the input connection slots for the layer, connections are made after all layers have been created
1807 // only the tensors for the inputs are relevant, exclude the const tensors
1808 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1809 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1810
1811 // Register the output connection slots for the layer, connections are made after all layers have been created
1812 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1813 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
1814 outputTensorIndexes[1],
1815 outputTensorIndexes[2],
1816 outputTensorIndexes[3]});
1817}
1818
Sadik Armagan58f39192018-09-17 14:14:39 +01001819armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
1820 unsigned int outputSlot,
1821 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01001822{
1823 ActivationDescriptor activationDesc;
1824 std::string layerName = prevLayer->GetName();
1825
1826 switch(activationType)
1827 {
1828 case tflite::ActivationFunctionType_NONE:
1829 {
1830 // this is a no-op: return previous layer
1831 return prevLayer;
1832 }
1833 case tflite::ActivationFunctionType_RELU:
1834 {
1835 activationDesc.m_Function = ActivationFunction::ReLu;
1836 layerName += ":RELU";
1837 break;
1838 }
1839 case tflite::ActivationFunctionType_RELU6:
1840 {
1841 activationDesc.m_Function = ActivationFunction::BoundedReLu;
1842 activationDesc.m_A = 6.0f;
1843 activationDesc.m_B = 0.0f;
1844 layerName += ":RELU6";
1845 break;
1846 }
1847 case tflite::ActivationFunctionType_TANH:
1848 {
1849 activationDesc.m_Function = ActivationFunction::TanH;
1850 activationDesc.m_A = 1.0f;
1851 activationDesc.m_B = 1.0f;
1852 layerName += ":TANH";
1853 break;
1854 }
1855
1856 // I only put these here as a reminder what others we could support
1857 case tflite::ActivationFunctionType_RELU_N1_TO_1:
1858 case tflite::ActivationFunctionType_SIGN_BIT:
1859 default:
1860 {
1861 throw ParseException(
1862 boost::str(
1863 boost::format("TfLite parser doesn't suppport fused activation: "
1864 "%1%/%2% %3% ") %
1865 activationType %
1866 tflite::EnumNameActivationFunctionType(activationType) %
1867 CHECK_LOCATION().AsString()));
1868
1869 }
1870 }
1871
1872 IConnectableLayer* activationLayer =
1873 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1874
1875 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
1876 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
1877 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
1878 return activationLayer;
1879}
1880
1881TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
1882{
1883 if (fileName == nullptr)
1884 {
1885 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
1886 CHECK_LOCATION().AsString()));
1887 }
1888 boost::system::error_code errorCode;
1889 boost::filesystem::path pathToFile(fileName);
1890 if (!boost::filesystem::exists(pathToFile, errorCode))
1891 {
1892 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
1893 fileName %
1894 errorCode %
1895 CHECK_LOCATION().AsString()));
1896 }
1897 std::ifstream file(fileName, std::ios::binary);
1898 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
1899 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
1900 fileContent.size());
1901}
1902
1903TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
1904{
1905 if (binaryContent == nullptr)
1906 {
1907 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
1908 CHECK_LOCATION().AsString()));
1909 }
1910 flatbuffers::Verifier verifier(binaryContent, len);
1911 if (verifier.VerifyBuffer<tflite::Model>() == false)
1912 {
1913 throw ParseException(
1914 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
1915 "flatbuffers format. size:%1% %2%") %
1916 len %
1917 CHECK_LOCATION().AsString()));
1918 }
1919 return tflite::UnPackModel(binaryContent);
1920}
1921
1922TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
1923 size_t subgraphIndex,
1924 size_t operatorIndex)
1925{
1926 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1927
1928 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1929 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1930
1931 size_t inputCount = operatorPtr->inputs.size();
1932 TensorRawPtrVector result(inputCount);
1933 for (size_t i=0; i<inputCount; ++i)
1934 {
1935 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
1936 result[i] = subGraphPtr->tensors[inputId].get();
1937 }
1938 return result;
1939}
1940
1941TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
1942 size_t subgraphIndex,
1943 size_t operatorIndex)
1944{
1945 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1946
1947 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1948 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1949
1950 size_t outputCount = operatorPtr->outputs.size();
1951 TensorRawPtrVector result(outputCount);
1952 for (size_t i=0; i<outputCount; ++i)
1953 {
1954 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
1955 CHECK_TENSOR(model, subgraphIndex, outputId);
1956 result[i] = subGraphPtr->tensors[outputId].get();
1957 }
1958 return result;
1959}
1960
1961TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
1962 size_t subgraphIndex)
1963{
1964 CHECK_SUBGRAPH(model, subgraphIndex);
1965 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1966
1967 size_t inputCount = subGraphPtr->inputs.size();
1968 TensorIdRawPtrVector result(inputCount);
1969 for (size_t i=0; i<inputCount; ++i)
1970 {
1971 uint32_t inputId = CHECKED_NON_NEGATIVE(subGraphPtr->inputs[i]);
1972 CHECK_TENSOR(model, subgraphIndex, inputId);
1973 result[i] = std::make_pair(inputId, subGraphPtr->tensors[inputId].get());
1974 }
1975 return result;
1976}
1977
1978TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
1979 size_t subgraphIndex)
1980{
1981 CHECK_SUBGRAPH(model, subgraphIndex);
1982 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1983
1984 size_t outputCount = subGraphPtr->outputs.size();
1985 TensorIdRawPtrVector result(outputCount);
1986 for (size_t i=0; i<outputCount; ++i)
1987 {
1988 uint32_t outputId = CHECKED_NON_NEGATIVE(subGraphPtr->outputs[i]);
1989 result[i] = std::make_pair(outputId, subGraphPtr->tensors[outputId].get());
1990 }
1991 return result;
1992}
1993
1994std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
1995 size_t subgraphIndex,
1996 size_t operatorIndex)
1997{
1998 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1999 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
2000 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
2001 return operatorPtr->inputs;
2002}
2003
2004std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2005 size_t subgraphIndex,
2006 size_t operatorIndex)
2007{
2008 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2009 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
2010 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
2011 return operatorPtr->outputs;
2012}
2013
2014void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2015 size_t operatorIndex,
2016 IConnectableLayer* layer,
2017 const std::vector<unsigned int>& tensorIndexes)
2018{
2019 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2020 BOOST_ASSERT(layer != nullptr);
2021 if (tensorIndexes.size() != layer->GetNumInputSlots())
2022 {
2023 throw ParseException(
2024 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2025 " for subgraph:%3% operator index:%4% %5%") %
2026 tensorIndexes.size() %
2027 layer->GetNumInputSlots() %
2028 subgraphIndex %
2029 operatorIndex %
2030 CHECK_LOCATION().AsString()));
2031 }
2032
2033 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2034 {
2035 unsigned int tensorIndex = tensorIndexes[slotIndex];
2036 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2037 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2038 }
2039}
2040
2041void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2042 size_t operatorIndex,
2043 IConnectableLayer* layer,
2044 const std::vector<unsigned int>& tensorIndexes)
2045{
2046 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2047 BOOST_ASSERT(layer != nullptr);
2048 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2049 {
2050 throw ParseException(
2051 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2052 " for subgraph:%3% operator index:%4% %5%") %
2053 tensorIndexes.size() %
2054 layer->GetNumOutputSlots() %
2055 subgraphIndex %
2056 operatorIndex %
2057 CHECK_LOCATION().AsString()));
2058 }
2059
2060 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2061 {
2062 unsigned int tensorIndex = tensorIndexes[slotIndex];
2063 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2064 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2065 }
2066}
2067
2068void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2069{
2070 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2071
2072 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2073 for (auto const & tensorIdAndPtr : inputs)
2074 {
2075 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2076 IConnectableLayer* layer =
2077 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2078
2079 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2080 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2081
2082 RegisterOutputSlots(subgraphIndex,
2083 VIRTUAL_OPERATOR_ID,
2084 layer,
2085 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2086 }
2087}
2088
2089void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2090{
2091 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2092
2093 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2094 for (auto const & tensorIdAndPtr : outputs)
2095 {
2096 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2097 IConnectableLayer* layer =
2098 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2099
2100 RegisterInputSlots(subgraphIndex,
2101 VIRTUAL_OPERATOR_ID,
2102 layer,
2103 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2104 }
2105}
2106
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002107void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2108{
2109 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2110
2111 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
2112 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2113 {
2114 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2115 {
2116 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2117 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2118 {
2119 TensorRawPtr tensorPtr = subGraphPtr->tensors[tensorIndex].get();
2120 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2121 auto tensorAndData = CreateConstTensor(tensorPtr,
2122 tensorInfo,
2123 armnn::Optional<armnn::PermutationVector&>());
2124
2125 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2126 IConnectableLayer *layer =
2127 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2128
2129 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2130 RegisterOutputSlots(subgraphIndex,
2131 VIRTUAL_OPERATOR_ID,
2132 layer,
2133 { tensorIndex });
2134
2135 }
2136 }
2137 }
2138}
2139
telsoa01c577f2c2018-08-31 09:22:23 +01002140// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2141TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2142{
2143 CHECK_BUFFER(model, bufferIndex);
2144 return model->buffers[bufferIndex].get();
2145}
2146
Matteo Martincigh747ef822018-12-18 09:26:39 +00002147template<typename T>
2148std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2149TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2150 TfLiteParser::TensorRawPtr tensorPtr,
2151 armnn::TensorInfo& tensorInfo,
2152 armnn::Optional<armnn::PermutationVector&> permutationVector)
2153{
2154 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2155 tensorPtr,
2156 tensorInfo,
2157 permutationVector);
2158 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2159 return std::make_pair(constData.first, std::move(storage));
2160}
2161
telsoa01c577f2c2018-08-31 09:22:23 +01002162std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2163TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002164 armnn::TensorInfo& tensorInfo,
2165 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002166{
2167 CHECK_TENSOR_PTR(tensorPtr);
2168 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2169 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2170
2171 switch (tensorInfo.GetDataType())
2172 {
2173 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002174 return CreateConstTensorAndStoreData<float>(bufferPtr,
2175 tensorPtr,
2176 tensorInfo,
2177 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002178 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002179 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2180 tensorPtr,
2181 tensorInfo,
2182 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002183 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002184 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2185 tensorPtr,
2186 tensorInfo,
2187 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002188 default:
2189 {
2190 std::stringstream errString;
2191 errString << "Unexpected datatype when creating const tensor: "
2192 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2193 << " shape:" << tensorInfo.GetShape()
2194 << CHECK_LOCATION().AsString();
2195 throw ParseException(errString.str());
2196 }
2197 }
2198}
2199
2200BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2201 const std::string& name) const
2202{
2203 CHECK_SUBGRAPH(m_Model, subgraphId);
2204 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2205 for (auto const & input : inputs)
2206 {
2207 if (input.second->name == name)
2208 {
2209 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2210 return std::make_pair(bindingId, ToTensorInfo(input.second));
2211 }
2212 }
2213
2214 std::stringstream bindings;
2215 for (auto const & input : inputs)
2216 {
2217 bindings << "'" << input.second->name << "' ";
2218 }
2219
2220 throw ParseException(
2221 boost::str(
2222 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2223 "Possible inputs are: [%3%] %4%") %
2224 subgraphId %
2225 name %
2226 bindings.str() %
2227 CHECK_LOCATION().AsString()));
2228}
2229
2230BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2231 const std::string& name) const
2232{
2233 CHECK_SUBGRAPH(m_Model, subgraphId);
2234 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2235 for (auto const & output : outputs)
2236 {
2237 if (output.second->name == name)
2238 {
2239 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
2240 return std::make_pair(bindingId, ToTensorInfo(output.second));
2241 }
2242 }
2243
2244 std::stringstream bindings;
2245 for (auto const & output : outputs)
2246 {
2247 bindings << "'" << output.second->name << "' ";
2248 }
2249
2250 throw ParseException(
2251 boost::str(
2252 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2253 "Possible outputs are: [%3%] %4%") %
2254 subgraphId %
2255 name %
2256 bindings.str() %
2257 CHECK_LOCATION().AsString()));
2258}
2259
2260size_t TfLiteParser::GetSubgraphCount() const
2261{
2262 return m_Model->subgraphs.size();
2263}
2264
2265std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2266{
2267 CHECK_SUBGRAPH(m_Model, subgraphId);
2268 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2269 std::vector<std::string> result;
2270 result.reserve(inputs.size());
2271 for (auto const & input : inputs)
2272 {
2273 result.push_back(input.second->name);
2274 }
2275 return result;
2276}
2277
2278std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2279{
2280 CHECK_SUBGRAPH(m_Model, subgraphId);
2281 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2282 std::vector<std::string> result;
2283 result.reserve(outputs.size());
2284 for (auto const & output : outputs)
2285 {
2286 result.push_back(output.second->name);
2287 }
2288 return result;
2289}
2290
2291ITfLiteParser* ITfLiteParser::CreateRaw()
2292{
2293 return new TfLiteParser();
2294}
2295
2296ITfLiteParserPtr ITfLiteParser::Create()
2297{
2298 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2299}
2300
2301void ITfLiteParser::Destroy(ITfLiteParser* parser)
2302{
2303 delete parser;
2304}
2305
2306TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2307: m_FloatData(std::move(data))
2308, m_Uint8Data(nullptr)
2309, m_Int32Data(nullptr)
2310{
2311}
2312
2313TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2314: m_FloatData(nullptr)
2315, m_Uint8Data(std::move(data))
2316, m_Int32Data(nullptr)
2317{
2318}
2319
2320TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2321: m_FloatData(nullptr)
2322, m_Uint8Data(nullptr)
2323, m_Int32Data(std::move(data))
2324{
2325}
2326
2327} // armnnTfLiteParser