blob: 31e808fd6e4050bec5ad89e7b8a601cb2ad2cdcb [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
24
25#include <fstream>
26#include <algorithm>
27#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010028#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000029#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010030
31using namespace armnn;
32using armnn::CheckLocation;
33namespace armnnTfLiteParser
34{
35namespace
36{
jimfly01c25411c2018-11-14 17:47:22 +000037
telsoa01c577f2c2018-08-31 09:22:23 +010038const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
39
40void CheckSubgraph(const TfLiteParser::ModelPtr & model,
41 size_t subgraphIndex,
42 const CheckLocation & location)
43{
44 if (model.get() == nullptr)
45 {
46 throw ParseException(
47 boost::str(
48 boost::format("%1% was called with invalid (null) model. "
49 "Possible reason is that the model is not yet loaded and Unpack(ed). "
50 "subgraph:%2% at %3%") %
51 location.m_Function %
52 subgraphIndex %
53 location.FileLine()));
54 }
55 else if (subgraphIndex >= model->subgraphs.size())
56 {
57 throw ParseException(
58 boost::str(
59 boost::format("%1% was called with an invalid subgraph index. "
60 "subgraph:%2% at %3%") %
61 location.m_Function %
62 subgraphIndex %
63 location.FileLine()));
64 }
65}
66
67#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
68 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
69
70void CheckModel(const TfLiteParser::ModelPtr & model,
71 size_t subgraphIndex,
72 size_t operatorIndex,
73 const CheckLocation & location)
74{
75 if (model.get() == nullptr)
76 {
77 throw ParseException(
78 boost::str(
79 boost::format("%1% was called with invalid (null) model. "
80 "Possible reason is that the model is not yet loaded and Unpack(ed). "
81 "subgraph:%2% operator:%3% at %4%") %
82 location.m_Function %
83 subgraphIndex %
84 operatorIndex %
85 location.FileLine()));
86 }
87 else if (subgraphIndex >= model->subgraphs.size())
88 {
89 throw ParseException(
90 boost::str(
91 boost::format("%1% was called with an invalid subgraph index. "
92 "subgraph:%2% operator:%3% at %4%") %
93 location.m_Function %
94 subgraphIndex %
95 operatorIndex %
96 location.FileLine()));
97 }
98 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
99 operatorIndex != VIRTUAL_OPERATOR_ID)
100 {
101 throw ParseException(
102 boost::str(
103 boost::format("%1% was called with an invalid operator index. "
104 "subgraph:%2% operator:%3% at %4%") %
105 location.m_Function %
106 subgraphIndex %
107 operatorIndex %
108 location.FileLine()));
109 }
110}
111
112#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
113 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
114
115void CheckTensor(const TfLiteParser::ModelPtr & model,
116 size_t subgraphIndex,
117 size_t tensorIndex,
118 const CheckLocation & location)
119{
120 // not checking model, because I assume CHECK_MODEL already run
121 // and checked that. An assert would do.
122 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
123
124 // also subgraph index should be checked by CHECK_MODEL so
125 // I only add an assert here
126 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
127
128 // the tensor index is the only one to check here
129 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
130 {
131 throw ParseException(
132 boost::str(
133 boost::format("%1% was called with an invalid tensor index. "
134 "subgraph:%2% tensor:%3% at %4%") %
135 location.m_Function %
136 subgraphIndex %
137 tensorIndex %
138 location.FileLine()));
139 }
140}
141
142#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
143 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
144
145void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
146 const CheckLocation & location)
147{
148 if (rawPtr == nullptr)
149 {
150 throw ParseException(
151 boost::str(
152 boost::format("%1% was called with a null tensor pointer. "
153 "at %2%") %
154 location.m_Function %
155 location.FileLine()));
156
157 }
158}
159
160#define CHECK_TENSOR_PTR(TENSOR_PTR) \
161 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
162
163void CheckBuffer(const TfLiteParser::ModelPtr & model,
164 size_t bufferIndex,
165 const CheckLocation & location)
166{
167 if (model.get() == nullptr)
168 {
169 throw ParseException(
170 boost::str(
171 boost::format("%1% was called with invalid (null) model. "
172 "Possible reason is that the model is not yet loaded and Unpack(ed). "
173 "buffer:%2% at %3%") %
174 location.m_Function %
175 bufferIndex %
176 location.FileLine()));
177 }
178 else if (bufferIndex >= model->buffers.size())
179 {
180 throw ParseException(
181 boost::str(
182 boost::format("%1% was called with an invalid buffer index. "
183 "buffer index:%2% at %3%") %
184 location.m_Function %
185 bufferIndex %
186 location.FileLine()));
187 }
188 else if (model->buffers[bufferIndex].get() == nullptr)
189 {
190 throw ParseException(
191 boost::str(
192 boost::format("The buffer #%1% is null. %3%") %
193 bufferIndex %
194 location.AsString()));
195 }
196}
197
198#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
199 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
200
201void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
202 const armnn::TensorInfo & tensorInfo,
203 uint32_t bufferId,
204 const CheckLocation & location)
205{
206 if (bufferPtr == nullptr)
207 {
208 throw ParseException(
209 boost::str(
210 boost::format("BufferPtr is null for buffer:%1%. %2%") %
211 bufferId %
212 location.AsString()));
213 }
214 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
215 tensorInfo.GetNumBytes() > bufferPtr->data.size())
216 {
217 std::stringstream ss;
218 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
219 << "For tensor: " << tensorInfo.GetShape()
220 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
221 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
222 throw ParseException(ss.str());
223 }
224}
225
226#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
227 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
228
229bool IsActivationSupported(tflite::ActivationFunctionType activationType)
230{
231 switch(activationType)
232 {
233 case tflite::ActivationFunctionType_NONE:
234 case tflite::ActivationFunctionType_RELU:
235 case tflite::ActivationFunctionType_RELU6:
236 case tflite::ActivationFunctionType_TANH:
237 {
238 return true;
239 }
240 default:
241 {
242 return false;
243 }
244 }
245}
246
247#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
248 do { \
249 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
250 { \
251 throw ParseException( \
252 boost::str( \
253 boost::format("TfLite parser doesn't suppport fused activation: " \
254 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
255 OPTION->fused_activation_function % \
256 tflite::EnumNameActivationFunctionType(\
257 OPTION->fused_activation_function) % \
258 __func__ % \
259 SUBGRAPH_INDEX % \
260 OPERATOR_INDEX % \
261 CHECK_LOCATION().FileLine())); \
262 } \
263 } while(false)
264
265
266std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
267{
268 std::vector<unsigned int> result;
269 result.reserve(in.size());
270 for (auto & i : in)
271 {
272 result.push_back(CHECKED_NON_NEGATIVE(i));
273 }
274 return result;
275}
276
277void CalcPadding(uint32_t inputSize,
278 uint32_t filterSize,
279 uint32_t stride,
280 uint32_t& paddingFront,
281 uint32_t& paddingBack,
282 tflite::Padding padding)
283{
284 paddingFront = 0;
285 paddingBack = 0;
286 if (padding == tflite::Padding_SAME)
287 {
288 uint32_t outputSize = (inputSize + stride - 1) / stride;
289 uint32_t temp = (outputSize - 1) * stride + filterSize;
290 if (temp > inputSize)
291 {
292 paddingFront = (temp - inputSize) / 2;
293 paddingBack = (temp - inputSize) - paddingFront;
294 }
295 }
296}
297
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000298armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100299{
300 armnn::DataType type;
301 CHECK_TENSOR_PTR(tensorPtr);
302
303 switch (tensorPtr->type)
304 {
305 case tflite::TensorType_UINT8:
306 type = armnn::DataType::QuantisedAsymm8;
307 break;
308 case tflite::TensorType_FLOAT32:
309 type = armnn::DataType::Float32;
310 break;
311 case tflite::TensorType_INT32:
312 type = armnn::DataType::Signed32;
313 break;
314
315 default:
316 {
317 CheckLocation location = CHECK_LOCATION();
318 throw ParseException(
319 boost::str(
320 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
321 tensorPtr->type %
322 tflite::EnumNameTensorType(tensorPtr->type) %
323 tensorPtr->name %
324 location.AsString()));
325 }
326 }
327
328 float quantizationScale = 0.0f;
329 int32_t quantizationOffset = 0;
330
331 if (tensorPtr->quantization.get())
332 {
333 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
334 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
335
336 if (tensorPtr->quantization->scale.size() == 1)
337 {
338 quantizationScale = tensorPtr->quantization->scale[0];
339 }
340 if (tensorPtr->quantization->zero_point.size() == 1)
341 {
342 // NOTE: we lose precision here when converting from 64 bit to 32
343 // but this is what we support at the monent in ArmNN
344 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
345 }
346 }
347
telsoa01c577f2c2018-08-31 09:22:23 +0100348 // two statements (on purpose) for easier debugging:
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000349 armnn::TensorInfo result(static_cast<unsigned int>(shapes.size()),
350 shapes.data(),
telsoa01c577f2c2018-08-31 09:22:23 +0100351 type,
352 quantizationScale,
353 quantizationOffset);
354 return result;
355}
356
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000357armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
358{
359 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
360 return ToTensorInfo(tensorPtr, dimensions);
361}
362
telsoa01c577f2c2018-08-31 09:22:23 +0100363template<typename T>
364std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
365CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
366 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000367 armnn::TensorInfo& tensorInfo,
368 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100369{
370 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
371 BOOST_ASSERT_MSG(bufferPtr != nullptr,
372 boost::str(
373 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
374
375 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000376
377 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
378 {
379 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000380 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
381 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000382 }
383 else
384 {
385 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
386 }
387
telsoa01c577f2c2018-08-31 09:22:23 +0100388 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
389}
390
telsoa01c577f2c2018-08-31 09:22:23 +0100391armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
392{
393 // generate the binding id by shifting the tensor id by 8 bit
394 // and add the subgraph id, which allows 256 subgraphs
395 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
396}
397
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000398bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
399{
400 const unsigned int actualSize = actual.GetNumDimensions();
401 if (actualSize != expected.size())
402 {
403 return false;
404 }
405
406 for (unsigned int i = 0u; i < actualSize; i++)
407 {
408 if (expected[i] < 0 ||
409 actual[i] != static_cast<unsigned int>(expected[i]))
410 {
411 return false;
412 }
413 }
414
415 return true;
416}
417
telsoa01c577f2c2018-08-31 09:22:23 +0100418} // <anonymous>
419
420TfLiteParser::TfLiteParser()
421: m_Network(nullptr, nullptr)
422, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
423{
424 // register supported operators
425 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200426 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100427 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100428 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
429 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000430 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100431 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000432 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100433 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200434 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200435 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan58f39192018-09-17 14:14:39 +0100436 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
437 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100438 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -0200439 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
Sadik Armagan479045b2018-10-01 11:51:37 +0100440 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
Bruno Goncalvesbaded142019-02-08 19:02:48 -0200441 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100442 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalves451d95b2019-02-12 22:59:22 -0200443 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200444 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200445 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200446 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200447 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200448 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
telsoa01c577f2c2018-08-31 09:22:23 +0100449}
450
451void TfLiteParser::ResetParser()
452{
453 m_Network = armnn::INetworkPtr(nullptr, nullptr);
454 m_Model = nullptr;
455 m_SubgraphConnections.clear();
456}
457
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200458void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
459 size_t operatorIndex,
460 IConnectableLayer *layer)
461{
462 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
463 BOOST_ASSERT(layer != nullptr);
464
465 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
466 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
467
468 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
469
470 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
471 TensorRawPtr tensorPtr = subGraphPtr->tensors[reshapedInputId].get();
472 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
473 TensorRawPtr tensorPtr1 = subGraphPtr->tensors[inputId].get();
474
475 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
476 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
477
478 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
479 {
480 uint32_t id = reshapedInputId;
481 reshapedInputId = inputId;
482 inputId = id;
483
484 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
485 inputTensorInfo = ToTensorInfo(tensorPtr);
486 }
487
488 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
489
490 std::vector<unsigned> reshapedDim;
491 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
492 {
493 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
494 }
495
496 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
497 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
498
499 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
500
501 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
502 armnn::ReshapeDescriptor desc;
503 desc.m_TargetShape = reshapedTensorInfo.GetShape();
504 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
505
506 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
507 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
508
509 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
510
511 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
512 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
513}
514
telsoa01c577f2c2018-08-31 09:22:23 +0100515INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
516{
517 ResetParser();
518 m_Model = LoadModelFromFile(graphFile);
519 return CreateNetworkFromModel();
520}
521
522INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
523{
524 ResetParser();
525 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
526 return CreateNetworkFromModel();
527}
528
529INetworkPtr TfLiteParser::CreateNetworkFromModel()
530{
531 m_Network = INetwork::Create();
532 BOOST_ASSERT(m_Model.get() != nullptr);
533
534 bool failedToCreate = false;
535 std::stringstream errors;
536
537 if (m_Model->subgraphs.size() != 1)
538 {
539 throw ParseException(
540 boost::str(
541 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
542 m_Model->subgraphs.size() %
543 CHECK_LOCATION().AsString()));
544 }
545
546 size_t subgraphIndex = 0;
547 for (SubGraphPtr const & subgraph : m_Model->subgraphs)
548 {
549 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
550
551 size_t operatorIndex = 0;
552 for (OperatorPtr const & op : subgraph->operators)
553 {
554 try
555 {
telsoa01c577f2c2018-08-31 09:22:23 +0100556 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
557 auto builtinCode = opCodePtr->builtin_code;
558
559 if (builtinCode > tflite::BuiltinOperator_MAX)
560 {
561 throw ParseException(
562 boost::str(
563 boost::format("Operator code %1% is out of range 0-%2%. "
564 "subgraph:%3% operator idx:%4%. %5%") %
565 builtinCode %
566 tflite::BuiltinOperator_MAX %
567 subgraphIndex %
568 operatorIndex %
569 CHECK_LOCATION().AsString()));
570 }
571
572 // lookup and call the parser function
573 auto & parserFunction = m_ParserFunctions[builtinCode];
574 (this->*parserFunction)(subgraphIndex, operatorIndex);
575 }
576 catch (const ParseException& e)
577 {
578 failedToCreate = true;
579 std::stringstream errorString;
580
581 errorString << "Failed to parse operator #" << operatorIndex
582 << " within subgraph #" << subgraphIndex
583 << " error: " << e.what();
584 BOOST_LOG_TRIVIAL(error) << errorString.str();
585
586 errors << errorString.str() << "\n";
587 }
588 ++operatorIndex;
589 }
590
591 SetupInputLayers(subgraphIndex);
592 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200593 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100594
595 ++subgraphIndex;
596 }
597
598 if (failedToCreate)
599 {
600 // we can skip everything and let the outer exception handler deal with the error
601 throw ParseException(errors.str());
602 }
603
604 // establish the connections from the layer outputs to the inputs of the subsequent layers
605 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
606 {
607 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
608 {
609 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
610 {
611 for (size_t inputSlotIdx = 0;
612 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
613 ++inputSlotIdx)
614 {
615 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
616 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
617 }
618 }
619 }
620 }
621
622 return std::move(m_Network);
623}
624
625void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
626 size_t tensorIndex,
627 armnn::IOutputSlot* slot)
628{
629 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
630 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
631 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
632
633 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
634
635 // assuming there is only one producer for that tensor
636 if (tensorSlots.outputSlot != nullptr)
637 {
638 throw ParseException(boost::str(
639 boost::format("Another layer has already registered itself as the producer of "
640 "subgraph:%1% tensor:%2% %3%") %
641 subgraphIndex %
642 tensorIndex %
643 CHECK_LOCATION().AsString()));
644 }
645
646 tensorSlots.outputSlot = slot;
647}
648
649void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
650 size_t tensorIndex,
651 armnn::IInputSlot* slot)
652{
653 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
654 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
655 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
656
657 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
658 tensorSlots.inputSlots.push_back(slot);
659}
660
661void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
662{
663 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
664 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
665 //
666 auto opcodeIndex = operatorPtr->opcode_index;
667 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
668
669 throw ParseException(
670 boost::str(
671 boost::format("Operator not supported. "
672 "subgraph:%1% operator:%2% "
673 "opcode_index:%3% opcode:%4% / %5% %6%") %
674 subgraphIndex %
675 operatorIndex %
676 opcodeIndex %
677 opcode %
678 tflite::EnumNameBuiltinOperator(opcode) %
679 CHECK_LOCATION().AsString()));
680}
681
telsoa01c577f2c2018-08-31 09:22:23 +0100682void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
683{
684 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
685
686 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
687 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
688
689 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
690
691 Convolution2dDescriptor desc;
692 desc.m_BiasEnabled = false;
693 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
694 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000695 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100696
697 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
698 CHECK_VALID_SIZE(inputs.size(), 2, 3);
699
700 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
701 CHECK_VALID_SIZE(outputs.size(), 1);
702
703 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
704 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
705
706 // assuming input is NHWC
707 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
708 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
709
710 // assuming the filter is OHWI : Output, H, W, Input
711 // which is essentially the same as NHWC
712 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
713 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
714
715 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
716 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
717
Matteo Martincigh747ef822018-12-18 09:26:39 +0000718 auto filterTensorAndData = CreateConstTensor(inputs[1],
719 filterTensorInfo,
720 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100721 armnn::IConnectableLayer* layer;
722
723 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
724
725 if (inputs.size() == 3)
726 {
727 desc.m_BiasEnabled = true;
728 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000729 auto biasTensorAndData = CreateConstTensor(inputs[2],
730 biasTensorInfo,
731 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100732 layer = m_Network->AddConvolution2dLayer(desc,
733 filterTensorAndData.first,
734 biasTensorAndData.first,
735 layerName.c_str());
736 }
737 else
738 {
739 layer = m_Network->AddConvolution2dLayer(desc,
740 filterTensorAndData.first,
741 layerName.c_str());
742 }
743
744 BOOST_ASSERT(layer != nullptr);
745
telsoa01c577f2c2018-08-31 09:22:23 +0100746 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000747 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100748
749 // register the input connection slots for the layer, connections are made after all layers have been created
750 // only the tensors for the inputs are relevant, exclude the const tensors
751 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000752 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100753
jimfly01c25411c2018-11-14 17:47:22 +0000754 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100755 // register the output connection slots for the layer, connections are made after all layers have been created
756 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
757 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
758}
759
760void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
761{
762 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
763
764 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
765 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
766
767 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
768
769 DepthwiseConvolution2dDescriptor desc;
770 desc.m_BiasEnabled = false;
771 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
772 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000773 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100774 // ACL only supports a depth (channel) multiplier of 1, it is not currently stored in the descriptor
775 CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1);
776
777 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
778 CHECK_VALID_SIZE(inputs.size(), 2, 3);
779 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
780 CHECK_VALID_SIZE(outputs.size(), 1);
781
782 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
783 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
784
Matteo Martincigh747ef822018-12-18 09:26:39 +0000785 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100786 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
787 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000788
789 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100790 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
791 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
792
Matteo Martincigh747ef822018-12-18 09:26:39 +0000793 // Reshape weights as [ H, W, I, M ]
794 filterTensorInfo.SetShape({ filterHeight,
795 filterWidth,
796 inputTensorInfo.GetShape()[3],
797 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
798
799 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
800 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
801
telsoa01c577f2c2018-08-31 09:22:23 +0100802 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
803 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
804
Matteo Martincigh747ef822018-12-18 09:26:39 +0000805 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100806 armnn::IConnectableLayer* layer;
807 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
808
809 if (inputs.size() == 3)
810 {
811 desc.m_BiasEnabled = true;
812 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000813 auto biasTensorAndData = CreateConstTensor(inputs[2],
814 biasTensorInfo,
815 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100816 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
817 filterTensorAndData.first,
818 biasTensorAndData.first,
819 layerName.c_str());
820 }
821 else
822 {
823 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
824 filterTensorAndData.first,
825 layerName.c_str());
826 }
827 BOOST_ASSERT(layer != nullptr);
828
telsoa01c577f2c2018-08-31 09:22:23 +0100829 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000830 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100831
832 // register the input connection slots for the layer, connections are made after all layers have been created
833 // only the tensors for the inputs are relevant, exclude the const tensors
834 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000835 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100836
jimfly01c25411c2018-11-14 17:47:22 +0000837 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100838 // register the output connection slots for the layer, connections are made after all layers have been created
839 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
840 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
841}
842
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100843void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
844{
845 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
846}
847
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200848void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
849{
850 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
851
852 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
853 CHECK_VALID_SIZE(inputs.size(), 3);
854
855 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
856 CHECK_VALID_SIZE(outputs.size(), 1);
857
858 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
859 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
860
861 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
862 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
863
864 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
865 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
866
867 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
868 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
869
870 size_t step = 2;
871 std::vector<std::pair<unsigned int, unsigned int>> crops;
872 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
873 {
874 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
875 }
876
877 armnn::BatchToSpaceNdDescriptor desc;
878 desc.m_BlockShape = blockShape;
879 desc.m_Crops = crops;
880 desc.m_DataLayout = armnn::DataLayout::NHWC;
881
882 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
883
884 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
885 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
886
887 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
888
889 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
890 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
891
892 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
893 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
894}
895
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100896void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
897{
898 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
899}
900
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200901void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
902{
903 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
904
905 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
906 CHECK_VALID_SIZE(inputs.size(), 2);
907
908 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
909 CHECK_VALID_SIZE(outputs.size(), 1);
910
911 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
912 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
913
914 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
915 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
916
917 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
918 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
919
920 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
921 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
922 {
923 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
924 }
925 else
926 {
927 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
928 }
929
930 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
931 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
932}
933
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200934void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
935{
936 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
937
938 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
939 CHECK_VALID_SIZE(inputs.size(), 2);
940
941 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
942 CHECK_VALID_SIZE(outputs.size(), 1);
943
944 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
945 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
946
947 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
948 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
949
950 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
951 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
952
953 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
954 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
955 {
956 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
957 }
958 else
959 {
960 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
961 }
962
963 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
964 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
965}
966
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100967void TfLiteParser::ParsePool(size_t subgraphIndex,
968 size_t operatorIndex,
969 PoolingAlgorithm algorithm)
970{
971 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
972
973 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
974 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
975
976 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
977
978 std::string layerName;
979
980 switch (algorithm)
981 {
982 case PoolingAlgorithm::Average:
983 layerName =
984 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
985 break;
986 case PoolingAlgorithm::Max:
987 layerName =
988 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
989 break;
990 default:
991 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
992 }
993
994 Pooling2dDescriptor desc;
995
996 desc.m_PoolType = algorithm;
997 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
998 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
999 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1000 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1001 desc.m_PaddingMethod = PaddingMethod::Exclude;
1002 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001003 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001004
1005 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1006 CHECK_VALID_SIZE(inputs.size(), 1);
1007 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1008
1009 // assuming input is NHWC
1010 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1011 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1012
1013 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1014 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
1015
1016 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1017 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001018
1019 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1020
1021 BOOST_ASSERT(layer != nullptr);
1022
jimfly01c25411c2018-11-14 17:47:22 +00001023 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1024 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001025
1026 // register the input connection slots for the layer, connections are made after all layers have been created
1027 // only the tensors for the inputs are relevant, exclude the const tensors
1028 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001029 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001030
jimfly01c25411c2018-11-14 17:47:22 +00001031 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001032 // register the output connection slots for the layer, connections are made after all layers have been created
1033 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1034 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1035}
1036
telsoa01c577f2c2018-08-31 09:22:23 +01001037void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1038{
1039 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1040 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1041 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1042
1043 SoftmaxDescriptor desc;
1044 desc.m_Beta = options->beta;
1045
1046 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1047 CHECK_VALID_SIZE(inputs.size(), 1);
1048 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1049 CHECK_VALID_SIZE(outputs.size(), 1);
1050
1051 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1052 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1053
1054 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1055 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1056
1057 // register the input connection slots for the layer, connections are made after all layers have been created
1058 // only the tensors for the inputs are relevant, exclude the const tensors
1059 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1060 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1061
1062 // register the output connection slots for the layer, connections are made after all layers have been created
1063 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1064 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1065}
1066
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001067void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1068{
1069 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1070
1071 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1072 CHECK_VALID_SIZE(inputs.size(), 3);
1073
1074 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1075 CHECK_VALID_SIZE(outputs.size(), 1);
1076
1077 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1078 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1079
1080 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1081 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1082
1083 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1084 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1085
1086 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1087 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1088
1089 size_t step = 2;
1090 std::vector<std::pair<unsigned int, unsigned int>> padList;
1091 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1092 {
1093 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1094 }
1095
1096 armnn::SpaceToBatchNdDescriptor desc;
1097 desc.m_BlockShape = blockShape;
1098 desc.m_PadList = padList;
1099 desc.m_DataLayout = armnn::DataLayout::NHWC;
1100
1101 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1102
1103 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1104 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1105
1106 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1107
1108 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1109 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1110
1111 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1112 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1113}
1114
telsoa01c577f2c2018-08-31 09:22:23 +01001115armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1116 const armnn::TensorInfo & inputTensorInfo)
1117{
1118 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1119 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1120 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1121
1122 if (inputTensorInfo.GetNumDimensions() > 4)
1123 {
1124 std::stringstream ss;
1125 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1126 << " shape:" << inputTensorInfo.GetShape() << " "
1127 << CHECK_LOCATION().AsString();
1128 throw ParseException(ss.str());
1129 }
1130
1131 if (squeezeDims.empty())
1132 {
1133 squeezeDims.assign(dimensionSequence,
1134 dimensionSequence+inputTensorInfo.GetNumDimensions());
1135 }
1136
1137 std::vector<uint32_t> outputDims;
1138 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1139 {
1140 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1141 auto currentDimension = inputTensorInfo.GetShape()[i];
1142 if (skipSqueeze || currentDimension != 1)
1143 {
1144 outputDims.push_back(currentDimension);
1145 }
1146 }
1147
1148 if (outputDims.size() > 4)
1149 {
1150 std::stringstream ss;
1151 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1152 << " shape:" << inputTensorInfo.GetShape() << " "
1153 << CHECK_LOCATION().AsString();
1154 throw ParseException(ss.str());
1155 }
1156
1157 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1158 outputDims.data());
1159
1160 // we need to preserve the tensor type and the quantization data as well
1161 TensorInfo outTensorInfo = inputTensorInfo;
1162 outTensorInfo.SetShape(outShape);
1163
1164 return outTensorInfo;
1165}
1166
1167void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1168{
1169 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1170
1171 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1172 CHECK_VALID_SIZE(inputs.size(), 1);
1173
1174 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1175 CHECK_VALID_SIZE(outputs.size(), 1);
1176
1177 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1178 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1179
1180 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1181 armnn::TensorInfo outputTensorInfo =
1182 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1183 inputTensorInfo);
1184
1185 ReshapeDescriptor reshapeDesc;
1186 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1187
1188 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1189 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1190 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1191
1192 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1193 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1194
1195 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1196 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1197}
1198
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001199void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1200{
1201 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1202
1203 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1204 CHECK_VALID_SIZE(inputs.size(), 4);
1205
1206 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1207 CHECK_VALID_SIZE(outputs.size(), 1);
1208
1209 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1210 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1211
1212 StridedSliceDescriptor desc;
1213 desc.m_BeginMask = options->begin_mask;
1214 desc.m_EllipsisMask = options->ellipsis_mask;
1215 desc.m_EndMask = options->end_mask;
1216 desc.m_NewAxisMask = options->new_axis_mask;
1217 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1218 desc.m_DataLayout = armnn::DataLayout::NHWC;
1219
1220 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1221 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1222
1223 std::vector<int> begin(beginTensorInfo.GetNumElements());
1224 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1225
1226 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1227 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1228
1229 std::vector<int> end(endTensorInfo.GetNumElements());
1230 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1231
1232 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1233 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1234
1235 std::vector<int> stride(strideTensorInfo.GetNumElements());
1236 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1237
1238 desc.m_Begin = begin;
1239 desc.m_End = end;
1240 desc.m_Stride = stride;
1241
1242 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1243 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1244
1245 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1246 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1247
1248 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1249 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1250
1251 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1252 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1253}
1254
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001255void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1256{
1257 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1258
1259 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1260 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1261
1262 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1263 CHECK_VALID_SIZE(inputs.size(), 2);
1264
1265 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1266 CHECK_VALID_SIZE(outputs.size(), 1);
1267
1268 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1269 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1270
1271 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1272 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1273
1274 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1275 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1276
1277 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1278 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1279 {
1280 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1281 }
1282 else
1283 {
1284 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1285 }
1286
1287 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1288
1289 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1290 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1291}
1292
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001293void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1294{
1295 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1296
1297 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1298 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1299
1300 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1301 CHECK_VALID_SIZE(inputs.size(), 2);
1302
1303 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1304 CHECK_VALID_SIZE(outputs.size(), 1);
1305
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001306 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1307 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1308
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001309 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1310 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1311
1312 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1313 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1314
1315 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001316 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1317 {
1318 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1319 }
1320 else
1321 {
1322 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1323 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001324
1325 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1326
1327 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1328 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1329}
1330
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001331void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1332{
1333 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1334
1335 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1336 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1337
1338 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1339 CHECK_VALID_SIZE(inputs.size(), 2);
1340
1341 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1342 CHECK_VALID_SIZE(outputs.size(), 1);
1343
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001344 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1345 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1346
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001347 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1348 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1349
1350 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1351 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1352
1353 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001354 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1355 {
1356 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1357 }
1358 else
1359 {
1360 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1361 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001362
1363 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1364
1365 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1366 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1367}
1368
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001369void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1370{
1371 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1372
1373 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1374
1375 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1376 CHECK_VALID_SIZE(outputs.size(), 1);
1377
1378 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1379 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1380
1381 armnn::MeanDescriptor desc;
1382 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1383 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1384 desc.m_Axis = axis;
1385
1386 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1387 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1388
1389 desc.m_KeepDims =
1390 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1391 true : false;
1392
1393 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1394 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1395
1396 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1397
1398 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1399 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1400
1401 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1402 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1403}
1404
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001405void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1406{
1407 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1408
1409 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1410
1411 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1412 CHECK_VALID_SIZE(outputs.size(), 1);
1413
1414 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1415 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1416
1417 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1418 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1419
1420 size_t step = 2;
1421 armnn::PadDescriptor desc;
1422 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1423 {
1424 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1425 }
1426
1427 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1428 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1429
1430 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1431 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1432
1433 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1434 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1435
1436 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1437 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1438}
1439
Finn Williamsc42c3842019-01-22 14:18:11 +00001440
Sadik Armagan58f39192018-09-17 14:14:39 +01001441void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1442{
Finn Williamsc42c3842019-01-22 14:18:11 +00001443 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001444}
1445
1446void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1447{
Finn Williamsc42c3842019-01-22 14:18:11 +00001448 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1449}
Sadik Armagan58f39192018-09-17 14:14:39 +01001450
Finn Williamsc42c3842019-01-22 14:18:11 +00001451void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1452{
1453 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1454}
1455
1456
1457void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1458{
1459 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001460 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1461 boost::ignore_unused(operatorPtr);
1462
1463 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1464 CHECK_VALID_SIZE(inputs.size(), 1);
1465
1466 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1467 CHECK_VALID_SIZE(outputs.size(), 1);
1468
Finn Williamsc42c3842019-01-22 14:18:11 +00001469 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001470 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001471 activationDesc.m_Function = activationType;
1472
1473 switch (activationType)
1474 {
1475 case ActivationFunction::ReLu:
1476 {
1477 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1478 break;
1479 }
1480 case ActivationFunction::BoundedReLu:
1481 {
1482 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1483 activationDesc.m_A = 6.0f;
1484 activationDesc.m_B = 0.0f;
1485 break;
1486 }
1487 case ActivationFunction::Sigmoid:
1488 {
1489 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1490 break;
1491 }
1492 default:
1493 {
1494 throw ParseException(
1495 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1496 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1497 }
1498 }
1499
1500 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001501
1502 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1503 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1504
1505 // register the input connection slots for the layer, connections are made after all layers have been created
1506 // only the tensors for the inputs are relevant, exclude the const tensors
1507 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1508 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1509
1510 // register the output connection slots for the layer, connections are made after all layers have been created
1511 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1512 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1513}
Sadikb94967b2018-09-19 15:30:00 +01001514armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1515 const std::vector<int32_t> & targetDimsIn)
1516{
1517 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1518 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1519
1520 if (stretchDim != targetDimsIn.end())
1521 {
1522 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1523 {
1524 throw ParseException(
1525 boost::str(
1526 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1527 }
1528
1529 auto targetNumElements =
1530 boost::numeric_cast<unsigned int>(
1531 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1532
1533 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1534 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1535 }
1536
1537 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1538
1539 TensorInfo reshapeInfo = inputTensorInfo;
1540 reshapeInfo.SetShape(outputShape);
1541
1542 return reshapeInfo;
1543}
1544
1545void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1546{
1547 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1548
1549 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001550
1551 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1552 CHECK_VALID_SIZE(outputs.size(), 1);
1553
1554 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1555 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1556
1557 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001558 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1559 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001560 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1561
kevmay0171972a82018-12-17 14:28:03 +00001562 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001563 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1564 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001565 {
1566 std::stringstream ss;
1567 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001568 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001569 << " does not equal output shape "
1570 << actualOutputTensorInfo.GetShape()
1571 << ": "
1572 << CHECK_LOCATION().AsString();
1573 throw ParseException(ss.str());
1574 }
1575
Sadikb94967b2018-09-19 15:30:00 +01001576 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001577 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001578
1579 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1580 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001581 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001582
1583 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1584 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1585
1586 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1587 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1588}
1589
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001590void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1591{
1592 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1593
1594 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1595 CHECK_VALID_SIZE(inputs.size(), 2);
1596
1597 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1598 CHECK_VALID_SIZE(outputs.size(), 1);
1599
1600 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1601
1602 // Data for the parsed tensor args (size) must be stored locally.
1603 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1604
1605 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1606 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1607
1608 ResizeBilinearDescriptor desc;
1609 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
1610 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1611 desc.m_DataLayout = armnn::DataLayout::NHWC;
1612
1613 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
1614 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, layerName.c_str());
1615
1616 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1617 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1618
1619 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1620 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1621
1622 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1623 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1624}
1625
Sadik Armagan479045b2018-10-01 11:51:37 +01001626void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1627{
1628 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1629
1630 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1631 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1632
1633 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1634
1635 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1636 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1637 CHECK_VALID_SIZE(outputs.size(), 1);
1638
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001639 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1640 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001641
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001642 const unsigned int concatDimInput = static_cast<unsigned int>(
1643 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001644
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001645 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1646 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001647
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001648 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001649
1650 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1651 {
1652 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1653
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001654 // This set up concatDescriptor view origin
1655 armnnUtils::ProcessConcatInputTensorInfo(
1656 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001657 }
1658
1659 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
1660 IConnectableLayer* layer = m_Network->AddMergerLayer(concatDescriptor, layerName.c_str());
1661
1662 BOOST_ASSERT(layer != nullptr);
1663
1664 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1665 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001666
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001667 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001668
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001669 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001670
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001671 // add fused activation layer
1672 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001673
Sadik Armagan479045b2018-10-01 11:51:37 +01001674 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1675 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1676}
1677
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001678void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1679{
1680 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1681
1682 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1683 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1684
1685 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1686
1687 FullyConnectedDescriptor desc;
1688 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001689 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001690
1691 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1692 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1693 CHECK_VALID_SIZE(outputs.size(), 1);
1694
1695 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1696
1697 // Fully Connected Layer accepts two dimensional weights input
1698 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1699 if (weightsDimension != 2)
1700 {
1701 throw ParseException(
1702 boost::str(
1703 boost::format(
1704 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1705 "Node %2%")
1706 % weightsDimension
1707 % CHECK_LOCATION().AsString()));
1708 }
1709
Matteo Martincigh747ef822018-12-18 09:26:39 +00001710 auto filterTensorAndData = CreateConstTensor(inputs[1],
1711 filterTensorInfo,
1712 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001713 armnn::IConnectableLayer* layer;
1714 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1715
1716 if (inputs.size() == 3)
1717 {
1718 desc.m_BiasEnabled = true;
1719 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001720 auto biasTensorAndData = CreateConstTensor(inputs[2],
1721 biasTensorInfo,
1722 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001723 layer = m_Network->AddFullyConnectedLayer(desc,
1724 filterTensorAndData.first,
1725 biasTensorAndData.first,
1726 layerName.c_str());
1727 }
1728 else
1729 {
1730 layer = m_Network->AddFullyConnectedLayer(desc,
1731 filterTensorAndData.first,
1732 layerName.c_str());
1733 }
1734 BOOST_ASSERT(layer != nullptr);
1735
1736 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1737 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1738
1739 // register the input connection slot for the layer
1740 // only the tensors for the inputs are relevant, exclude the const tensors
1741 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1742 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1743
1744 // we need to add the activation layer and fortunately we don't need to care about the data layout
1745 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1746 options->fused_activation_function);
1747 // register the output connection slots for the layer, connections are made after all layers have been created
1748 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1749 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1750}
1751
keidav011b3e2ea2019-02-21 10:07:37 +00001752void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1753{
1754 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1755
1756 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1757
1758 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1759 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1760 CHECK_VALID_SIZE(outputs.size(), 4);
1761
1762 // Obtain custom options from flexbuffers
1763 auto custom_options = operatorPtr->custom_options;
1764 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1765
1766 // Obtain descriptor information from tf lite
1767 DetectionPostProcessDescriptor desc;
1768 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1769 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1770 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1771 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1772 desc.m_NumClasses = m["num_classes"].AsUInt32();
1773 desc.m_ScaleH = m["h_scale"].AsFloat();
1774 desc.m_ScaleW = m["w_scale"].AsFloat();
1775 desc.m_ScaleX = m["x_scale"].AsFloat();
1776 desc.m_ScaleY = m["y_scale"].AsFloat();
1777
keidav0107d58c72019-02-26 11:57:39 +00001778 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00001779 {
keidav0107d58c72019-02-26 11:57:39 +00001780 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00001781 }
1782 if (!(m["detections_per_class"].IsNull()))
1783 {
1784 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1785 }
1786
1787 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1788 {
1789 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1790 "must be positive and less than or equal to 1.");
1791 }
1792
1793 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
1794 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
1795 armnn::Optional<armnn::PermutationVector&>());
1796
1797 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
1798 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
1799 layerName.c_str());
1800
1801 BOOST_ASSERT(layer != nullptr);
1802
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001803 // The model does not specify the output shapes.
1804 // The output shapes are calculated from the max_detection and max_classes_per_detection.
1805 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
1806 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
1807 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
1808 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
1809 m_OverridenOutputShapes.push_back({ 1 });
1810
keidav011b3e2ea2019-02-21 10:07:37 +00001811 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
1812 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001813 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00001814 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
1815 }
1816
1817 // Register the input connection slots for the layer, connections are made after all layers have been created
1818 // only the tensors for the inputs are relevant, exclude the const tensors
1819 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1820 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1821
1822 // Register the output connection slots for the layer, connections are made after all layers have been created
1823 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1824 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
1825 outputTensorIndexes[1],
1826 outputTensorIndexes[2],
1827 outputTensorIndexes[3]});
1828}
1829
Sadik Armagan58f39192018-09-17 14:14:39 +01001830armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
1831 unsigned int outputSlot,
1832 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01001833{
1834 ActivationDescriptor activationDesc;
1835 std::string layerName = prevLayer->GetName();
1836
1837 switch(activationType)
1838 {
1839 case tflite::ActivationFunctionType_NONE:
1840 {
1841 // this is a no-op: return previous layer
1842 return prevLayer;
1843 }
1844 case tflite::ActivationFunctionType_RELU:
1845 {
1846 activationDesc.m_Function = ActivationFunction::ReLu;
1847 layerName += ":RELU";
1848 break;
1849 }
1850 case tflite::ActivationFunctionType_RELU6:
1851 {
1852 activationDesc.m_Function = ActivationFunction::BoundedReLu;
1853 activationDesc.m_A = 6.0f;
1854 activationDesc.m_B = 0.0f;
1855 layerName += ":RELU6";
1856 break;
1857 }
1858 case tflite::ActivationFunctionType_TANH:
1859 {
1860 activationDesc.m_Function = ActivationFunction::TanH;
1861 activationDesc.m_A = 1.0f;
1862 activationDesc.m_B = 1.0f;
1863 layerName += ":TANH";
1864 break;
1865 }
1866
1867 // I only put these here as a reminder what others we could support
1868 case tflite::ActivationFunctionType_RELU_N1_TO_1:
1869 case tflite::ActivationFunctionType_SIGN_BIT:
1870 default:
1871 {
1872 throw ParseException(
1873 boost::str(
1874 boost::format("TfLite parser doesn't suppport fused activation: "
1875 "%1%/%2% %3% ") %
1876 activationType %
1877 tflite::EnumNameActivationFunctionType(activationType) %
1878 CHECK_LOCATION().AsString()));
1879
1880 }
1881 }
1882
1883 IConnectableLayer* activationLayer =
1884 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1885
1886 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
1887 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
1888 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
1889 return activationLayer;
1890}
1891
1892TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
1893{
1894 if (fileName == nullptr)
1895 {
1896 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
1897 CHECK_LOCATION().AsString()));
1898 }
1899 boost::system::error_code errorCode;
1900 boost::filesystem::path pathToFile(fileName);
1901 if (!boost::filesystem::exists(pathToFile, errorCode))
1902 {
1903 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
1904 fileName %
1905 errorCode %
1906 CHECK_LOCATION().AsString()));
1907 }
1908 std::ifstream file(fileName, std::ios::binary);
1909 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
1910 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
1911 fileContent.size());
1912}
1913
1914TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
1915{
1916 if (binaryContent == nullptr)
1917 {
1918 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
1919 CHECK_LOCATION().AsString()));
1920 }
1921 flatbuffers::Verifier verifier(binaryContent, len);
1922 if (verifier.VerifyBuffer<tflite::Model>() == false)
1923 {
1924 throw ParseException(
1925 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
1926 "flatbuffers format. size:%1% %2%") %
1927 len %
1928 CHECK_LOCATION().AsString()));
1929 }
1930 return tflite::UnPackModel(binaryContent);
1931}
1932
1933TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
1934 size_t subgraphIndex,
1935 size_t operatorIndex)
1936{
1937 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1938
1939 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1940 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1941
1942 size_t inputCount = operatorPtr->inputs.size();
1943 TensorRawPtrVector result(inputCount);
1944 for (size_t i=0; i<inputCount; ++i)
1945 {
1946 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
1947 result[i] = subGraphPtr->tensors[inputId].get();
1948 }
1949 return result;
1950}
1951
1952TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
1953 size_t subgraphIndex,
1954 size_t operatorIndex)
1955{
1956 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1957
1958 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1959 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1960
1961 size_t outputCount = operatorPtr->outputs.size();
1962 TensorRawPtrVector result(outputCount);
1963 for (size_t i=0; i<outputCount; ++i)
1964 {
1965 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
1966 CHECK_TENSOR(model, subgraphIndex, outputId);
1967 result[i] = subGraphPtr->tensors[outputId].get();
1968 }
1969 return result;
1970}
1971
1972TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
1973 size_t subgraphIndex)
1974{
1975 CHECK_SUBGRAPH(model, subgraphIndex);
1976 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1977
1978 size_t inputCount = subGraphPtr->inputs.size();
1979 TensorIdRawPtrVector result(inputCount);
1980 for (size_t i=0; i<inputCount; ++i)
1981 {
1982 uint32_t inputId = CHECKED_NON_NEGATIVE(subGraphPtr->inputs[i]);
1983 CHECK_TENSOR(model, subgraphIndex, inputId);
1984 result[i] = std::make_pair(inputId, subGraphPtr->tensors[inputId].get());
1985 }
1986 return result;
1987}
1988
1989TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
1990 size_t subgraphIndex)
1991{
1992 CHECK_SUBGRAPH(model, subgraphIndex);
1993 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1994
1995 size_t outputCount = subGraphPtr->outputs.size();
1996 TensorIdRawPtrVector result(outputCount);
1997 for (size_t i=0; i<outputCount; ++i)
1998 {
1999 uint32_t outputId = CHECKED_NON_NEGATIVE(subGraphPtr->outputs[i]);
2000 result[i] = std::make_pair(outputId, subGraphPtr->tensors[outputId].get());
2001 }
2002 return result;
2003}
2004
2005std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2006 size_t subgraphIndex,
2007 size_t operatorIndex)
2008{
2009 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2010 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
2011 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
2012 return operatorPtr->inputs;
2013}
2014
2015std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2016 size_t subgraphIndex,
2017 size_t operatorIndex)
2018{
2019 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2020 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
2021 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
2022 return operatorPtr->outputs;
2023}
2024
2025void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2026 size_t operatorIndex,
2027 IConnectableLayer* layer,
2028 const std::vector<unsigned int>& tensorIndexes)
2029{
2030 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2031 BOOST_ASSERT(layer != nullptr);
2032 if (tensorIndexes.size() != layer->GetNumInputSlots())
2033 {
2034 throw ParseException(
2035 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2036 " for subgraph:%3% operator index:%4% %5%") %
2037 tensorIndexes.size() %
2038 layer->GetNumInputSlots() %
2039 subgraphIndex %
2040 operatorIndex %
2041 CHECK_LOCATION().AsString()));
2042 }
2043
2044 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2045 {
2046 unsigned int tensorIndex = tensorIndexes[slotIndex];
2047 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2048 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2049 }
2050}
2051
2052void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2053 size_t operatorIndex,
2054 IConnectableLayer* layer,
2055 const std::vector<unsigned int>& tensorIndexes)
2056{
2057 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2058 BOOST_ASSERT(layer != nullptr);
2059 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2060 {
2061 throw ParseException(
2062 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2063 " for subgraph:%3% operator index:%4% %5%") %
2064 tensorIndexes.size() %
2065 layer->GetNumOutputSlots() %
2066 subgraphIndex %
2067 operatorIndex %
2068 CHECK_LOCATION().AsString()));
2069 }
2070
2071 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2072 {
2073 unsigned int tensorIndex = tensorIndexes[slotIndex];
2074 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2075 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2076 }
2077}
2078
2079void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2080{
2081 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2082
2083 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2084 for (auto const & tensorIdAndPtr : inputs)
2085 {
2086 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2087 IConnectableLayer* layer =
2088 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2089
2090 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2091 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2092
2093 RegisterOutputSlots(subgraphIndex,
2094 VIRTUAL_OPERATOR_ID,
2095 layer,
2096 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2097 }
2098}
2099
2100void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2101{
2102 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2103
2104 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2105 for (auto const & tensorIdAndPtr : outputs)
2106 {
2107 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2108 IConnectableLayer* layer =
2109 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2110
2111 RegisterInputSlots(subgraphIndex,
2112 VIRTUAL_OPERATOR_ID,
2113 layer,
2114 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2115 }
2116}
2117
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002118void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2119{
2120 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2121
2122 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
2123 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2124 {
2125 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2126 {
2127 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2128 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2129 {
2130 TensorRawPtr tensorPtr = subGraphPtr->tensors[tensorIndex].get();
2131 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2132 auto tensorAndData = CreateConstTensor(tensorPtr,
2133 tensorInfo,
2134 armnn::Optional<armnn::PermutationVector&>());
2135
2136 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2137 IConnectableLayer *layer =
2138 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2139
2140 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2141 RegisterOutputSlots(subgraphIndex,
2142 VIRTUAL_OPERATOR_ID,
2143 layer,
2144 { tensorIndex });
2145
2146 }
2147 }
2148 }
2149}
2150
telsoa01c577f2c2018-08-31 09:22:23 +01002151// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2152TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2153{
2154 CHECK_BUFFER(model, bufferIndex);
2155 return model->buffers[bufferIndex].get();
2156}
2157
Matteo Martincigh747ef822018-12-18 09:26:39 +00002158template<typename T>
2159std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2160TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2161 TfLiteParser::TensorRawPtr tensorPtr,
2162 armnn::TensorInfo& tensorInfo,
2163 armnn::Optional<armnn::PermutationVector&> permutationVector)
2164{
2165 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2166 tensorPtr,
2167 tensorInfo,
2168 permutationVector);
2169 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2170 return std::make_pair(constData.first, std::move(storage));
2171}
2172
telsoa01c577f2c2018-08-31 09:22:23 +01002173std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2174TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002175 armnn::TensorInfo& tensorInfo,
2176 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002177{
2178 CHECK_TENSOR_PTR(tensorPtr);
2179 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2180 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2181
2182 switch (tensorInfo.GetDataType())
2183 {
2184 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002185 return CreateConstTensorAndStoreData<float>(bufferPtr,
2186 tensorPtr,
2187 tensorInfo,
2188 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002189 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002190 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2191 tensorPtr,
2192 tensorInfo,
2193 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002194 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002195 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2196 tensorPtr,
2197 tensorInfo,
2198 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002199 default:
2200 {
2201 std::stringstream errString;
2202 errString << "Unexpected datatype when creating const tensor: "
2203 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2204 << " shape:" << tensorInfo.GetShape()
2205 << CHECK_LOCATION().AsString();
2206 throw ParseException(errString.str());
2207 }
2208 }
2209}
2210
2211BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2212 const std::string& name) const
2213{
2214 CHECK_SUBGRAPH(m_Model, subgraphId);
2215 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2216 for (auto const & input : inputs)
2217 {
2218 if (input.second->name == name)
2219 {
2220 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2221 return std::make_pair(bindingId, ToTensorInfo(input.second));
2222 }
2223 }
2224
2225 std::stringstream bindings;
2226 for (auto const & input : inputs)
2227 {
2228 bindings << "'" << input.second->name << "' ";
2229 }
2230
2231 throw ParseException(
2232 boost::str(
2233 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2234 "Possible inputs are: [%3%] %4%") %
2235 subgraphId %
2236 name %
2237 bindings.str() %
2238 CHECK_LOCATION().AsString()));
2239}
2240
2241BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2242 const std::string& name) const
2243{
2244 CHECK_SUBGRAPH(m_Model, subgraphId);
2245 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002246 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002247 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002248 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002249 if (output.second->name == name)
2250 {
2251 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002252 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2253 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2254 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002255 }
2256 }
2257
2258 std::stringstream bindings;
2259 for (auto const & output : outputs)
2260 {
2261 bindings << "'" << output.second->name << "' ";
2262 }
2263
2264 throw ParseException(
2265 boost::str(
2266 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2267 "Possible outputs are: [%3%] %4%") %
2268 subgraphId %
2269 name %
2270 bindings.str() %
2271 CHECK_LOCATION().AsString()));
2272}
2273
2274size_t TfLiteParser::GetSubgraphCount() const
2275{
2276 return m_Model->subgraphs.size();
2277}
2278
2279std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2280{
2281 CHECK_SUBGRAPH(m_Model, subgraphId);
2282 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2283 std::vector<std::string> result;
2284 result.reserve(inputs.size());
2285 for (auto const & input : inputs)
2286 {
2287 result.push_back(input.second->name);
2288 }
2289 return result;
2290}
2291
2292std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2293{
2294 CHECK_SUBGRAPH(m_Model, subgraphId);
2295 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2296 std::vector<std::string> result;
2297 result.reserve(outputs.size());
2298 for (auto const & output : outputs)
2299 {
2300 result.push_back(output.second->name);
2301 }
2302 return result;
2303}
2304
2305ITfLiteParser* ITfLiteParser::CreateRaw()
2306{
2307 return new TfLiteParser();
2308}
2309
2310ITfLiteParserPtr ITfLiteParser::Create()
2311{
2312 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2313}
2314
2315void ITfLiteParser::Destroy(ITfLiteParser* parser)
2316{
2317 delete parser;
2318}
2319
2320TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2321: m_FloatData(std::move(data))
2322, m_Uint8Data(nullptr)
2323, m_Int32Data(nullptr)
2324{
2325}
2326
2327TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2328: m_FloatData(nullptr)
2329, m_Uint8Data(std::move(data))
2330, m_Int32Data(nullptr)
2331{
2332}
2333
2334TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2335: m_FloatData(nullptr)
2336, m_Uint8Data(nullptr)
2337, m_Int32Data(std::move(data))
2338{
2339}
2340
2341} // armnnTfLiteParser