blob: 4acd30805eee3ad1d6cfa1d0b506d6af5f6565d0 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
24
25#include <fstream>
26#include <algorithm>
27#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010028#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000029#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010030
31using namespace armnn;
32using armnn::CheckLocation;
33namespace armnnTfLiteParser
34{
35namespace
36{
jimfly01c25411c2018-11-14 17:47:22 +000037
telsoa01c577f2c2018-08-31 09:22:23 +010038const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
39
40void CheckSubgraph(const TfLiteParser::ModelPtr & model,
41 size_t subgraphIndex,
42 const CheckLocation & location)
43{
44 if (model.get() == nullptr)
45 {
46 throw ParseException(
47 boost::str(
48 boost::format("%1% was called with invalid (null) model. "
49 "Possible reason is that the model is not yet loaded and Unpack(ed). "
50 "subgraph:%2% at %3%") %
51 location.m_Function %
52 subgraphIndex %
53 location.FileLine()));
54 }
55 else if (subgraphIndex >= model->subgraphs.size())
56 {
57 throw ParseException(
58 boost::str(
59 boost::format("%1% was called with an invalid subgraph index. "
60 "subgraph:%2% at %3%") %
61 location.m_Function %
62 subgraphIndex %
63 location.FileLine()));
64 }
65}
66
67#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
68 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
69
70void CheckModel(const TfLiteParser::ModelPtr & model,
71 size_t subgraphIndex,
72 size_t operatorIndex,
73 const CheckLocation & location)
74{
75 if (model.get() == nullptr)
76 {
77 throw ParseException(
78 boost::str(
79 boost::format("%1% was called with invalid (null) model. "
80 "Possible reason is that the model is not yet loaded and Unpack(ed). "
81 "subgraph:%2% operator:%3% at %4%") %
82 location.m_Function %
83 subgraphIndex %
84 operatorIndex %
85 location.FileLine()));
86 }
87 else if (subgraphIndex >= model->subgraphs.size())
88 {
89 throw ParseException(
90 boost::str(
91 boost::format("%1% was called with an invalid subgraph index. "
92 "subgraph:%2% operator:%3% at %4%") %
93 location.m_Function %
94 subgraphIndex %
95 operatorIndex %
96 location.FileLine()));
97 }
98 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
99 operatorIndex != VIRTUAL_OPERATOR_ID)
100 {
101 throw ParseException(
102 boost::str(
103 boost::format("%1% was called with an invalid operator index. "
104 "subgraph:%2% operator:%3% at %4%") %
105 location.m_Function %
106 subgraphIndex %
107 operatorIndex %
108 location.FileLine()));
109 }
110}
111
112#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
113 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
114
115void CheckTensor(const TfLiteParser::ModelPtr & model,
116 size_t subgraphIndex,
117 size_t tensorIndex,
118 const CheckLocation & location)
119{
120 // not checking model, because I assume CHECK_MODEL already run
121 // and checked that. An assert would do.
122 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
123
124 // also subgraph index should be checked by CHECK_MODEL so
125 // I only add an assert here
126 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
127
128 // the tensor index is the only one to check here
129 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
130 {
131 throw ParseException(
132 boost::str(
133 boost::format("%1% was called with an invalid tensor index. "
134 "subgraph:%2% tensor:%3% at %4%") %
135 location.m_Function %
136 subgraphIndex %
137 tensorIndex %
138 location.FileLine()));
139 }
140}
141
142#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
143 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
144
145void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
146 const CheckLocation & location)
147{
148 if (rawPtr == nullptr)
149 {
150 throw ParseException(
151 boost::str(
152 boost::format("%1% was called with a null tensor pointer. "
153 "at %2%") %
154 location.m_Function %
155 location.FileLine()));
156
157 }
158}
159
160#define CHECK_TENSOR_PTR(TENSOR_PTR) \
161 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
162
163void CheckBuffer(const TfLiteParser::ModelPtr & model,
164 size_t bufferIndex,
165 const CheckLocation & location)
166{
167 if (model.get() == nullptr)
168 {
169 throw ParseException(
170 boost::str(
171 boost::format("%1% was called with invalid (null) model. "
172 "Possible reason is that the model is not yet loaded and Unpack(ed). "
173 "buffer:%2% at %3%") %
174 location.m_Function %
175 bufferIndex %
176 location.FileLine()));
177 }
178 else if (bufferIndex >= model->buffers.size())
179 {
180 throw ParseException(
181 boost::str(
182 boost::format("%1% was called with an invalid buffer index. "
183 "buffer index:%2% at %3%") %
184 location.m_Function %
185 bufferIndex %
186 location.FileLine()));
187 }
188 else if (model->buffers[bufferIndex].get() == nullptr)
189 {
190 throw ParseException(
191 boost::str(
192 boost::format("The buffer #%1% is null. %3%") %
193 bufferIndex %
194 location.AsString()));
195 }
196}
197
198#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
199 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
200
201void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
202 const armnn::TensorInfo & tensorInfo,
203 uint32_t bufferId,
204 const CheckLocation & location)
205{
206 if (bufferPtr == nullptr)
207 {
208 throw ParseException(
209 boost::str(
210 boost::format("BufferPtr is null for buffer:%1%. %2%") %
211 bufferId %
212 location.AsString()));
213 }
214 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
215 tensorInfo.GetNumBytes() > bufferPtr->data.size())
216 {
217 std::stringstream ss;
218 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
219 << "For tensor: " << tensorInfo.GetShape()
220 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
221 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
222 throw ParseException(ss.str());
223 }
224}
225
226#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
227 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
228
229bool IsActivationSupported(tflite::ActivationFunctionType activationType)
230{
231 switch(activationType)
232 {
233 case tflite::ActivationFunctionType_NONE:
234 case tflite::ActivationFunctionType_RELU:
235 case tflite::ActivationFunctionType_RELU6:
236 case tflite::ActivationFunctionType_TANH:
237 {
238 return true;
239 }
240 default:
241 {
242 return false;
243 }
244 }
245}
246
247#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
248 do { \
249 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
250 { \
251 throw ParseException( \
252 boost::str( \
253 boost::format("TfLite parser doesn't suppport fused activation: " \
254 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
255 OPTION->fused_activation_function % \
256 tflite::EnumNameActivationFunctionType(\
257 OPTION->fused_activation_function) % \
258 __func__ % \
259 SUBGRAPH_INDEX % \
260 OPERATOR_INDEX % \
261 CHECK_LOCATION().FileLine())); \
262 } \
263 } while(false)
264
265
266std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
267{
268 std::vector<unsigned int> result;
269 result.reserve(in.size());
270 for (auto & i : in)
271 {
272 result.push_back(CHECKED_NON_NEGATIVE(i));
273 }
274 return result;
275}
276
277void CalcPadding(uint32_t inputSize,
278 uint32_t filterSize,
279 uint32_t stride,
280 uint32_t& paddingFront,
281 uint32_t& paddingBack,
282 tflite::Padding padding)
283{
284 paddingFront = 0;
285 paddingBack = 0;
286 if (padding == tflite::Padding_SAME)
287 {
288 uint32_t outputSize = (inputSize + stride - 1) / stride;
289 uint32_t temp = (outputSize - 1) * stride + filterSize;
290 if (temp > inputSize)
291 {
292 paddingFront = (temp - inputSize) / 2;
293 paddingBack = (temp - inputSize) - paddingFront;
294 }
295 }
296}
297
298armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
299{
300 armnn::DataType type;
301 CHECK_TENSOR_PTR(tensorPtr);
302
303 switch (tensorPtr->type)
304 {
305 case tflite::TensorType_UINT8:
306 type = armnn::DataType::QuantisedAsymm8;
307 break;
308 case tflite::TensorType_FLOAT32:
309 type = armnn::DataType::Float32;
310 break;
311 case tflite::TensorType_INT32:
312 type = armnn::DataType::Signed32;
313 break;
314
315 default:
316 {
317 CheckLocation location = CHECK_LOCATION();
318 throw ParseException(
319 boost::str(
320 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
321 tensorPtr->type %
322 tflite::EnumNameTensorType(tensorPtr->type) %
323 tensorPtr->name %
324 location.AsString()));
325 }
326 }
327
328 float quantizationScale = 0.0f;
329 int32_t quantizationOffset = 0;
330
331 if (tensorPtr->quantization.get())
332 {
333 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
334 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
335
336 if (tensorPtr->quantization->scale.size() == 1)
337 {
338 quantizationScale = tensorPtr->quantization->scale[0];
339 }
340 if (tensorPtr->quantization->zero_point.size() == 1)
341 {
342 // NOTE: we lose precision here when converting from 64 bit to 32
343 // but this is what we support at the monent in ArmNN
344 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
345 }
346 }
347
348 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
349
350 // two statements (on purpose) for easier debugging:
351 armnn::TensorInfo result(static_cast<unsigned int>(tensorPtr->shape.size()),
352 dimensions.data(),
353 type,
354 quantizationScale,
355 quantizationOffset);
356 return result;
357}
358
359template<typename T>
360std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
361CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
362 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000363 armnn::TensorInfo& tensorInfo,
364 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100365{
366 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
367 BOOST_ASSERT_MSG(bufferPtr != nullptr,
368 boost::str(
369 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
370
371 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000372
373 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
374 {
375 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000376 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
377 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000378 }
379 else
380 {
381 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
382 }
383
telsoa01c577f2c2018-08-31 09:22:23 +0100384 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
385}
386
telsoa01c577f2c2018-08-31 09:22:23 +0100387armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
388{
389 // generate the binding id by shifting the tensor id by 8 bit
390 // and add the subgraph id, which allows 256 subgraphs
391 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
392}
393
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000394bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
395{
396 const unsigned int actualSize = actual.GetNumDimensions();
397 if (actualSize != expected.size())
398 {
399 return false;
400 }
401
402 for (unsigned int i = 0u; i < actualSize; i++)
403 {
404 if (expected[i] < 0 ||
405 actual[i] != static_cast<unsigned int>(expected[i]))
406 {
407 return false;
408 }
409 }
410
411 return true;
412}
413
telsoa01c577f2c2018-08-31 09:22:23 +0100414} // <anonymous>
415
416TfLiteParser::TfLiteParser()
417: m_Network(nullptr, nullptr)
418, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
419{
420 // register supported operators
421 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Sadik Armagan479045b2018-10-01 11:51:37 +0100422 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100423 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
424 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000425 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100426 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000427 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100428 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Sadik Armagan58f39192018-09-17 14:14:39 +0100429 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
430 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100431 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -0200432 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
Sadik Armagan479045b2018-10-01 11:51:37 +0100433 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
434 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200435 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200436 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200437 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200438 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200439 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
telsoa01c577f2c2018-08-31 09:22:23 +0100440}
441
442void TfLiteParser::ResetParser()
443{
444 m_Network = armnn::INetworkPtr(nullptr, nullptr);
445 m_Model = nullptr;
446 m_SubgraphConnections.clear();
447}
448
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200449void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
450 size_t operatorIndex,
451 IConnectableLayer *layer)
452{
453 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
454 BOOST_ASSERT(layer != nullptr);
455
456 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
457 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
458
459 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
460
461 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
462 TensorRawPtr tensorPtr = subGraphPtr->tensors[reshapedInputId].get();
463 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
464 TensorRawPtr tensorPtr1 = subGraphPtr->tensors[inputId].get();
465
466 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
467 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
468
469 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
470 {
471 uint32_t id = reshapedInputId;
472 reshapedInputId = inputId;
473 inputId = id;
474
475 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
476 inputTensorInfo = ToTensorInfo(tensorPtr);
477 }
478
479 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
480
481 std::vector<unsigned> reshapedDim;
482 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
483 {
484 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
485 }
486
487 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
488 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
489
490 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
491
492 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
493 armnn::ReshapeDescriptor desc;
494 desc.m_TargetShape = reshapedTensorInfo.GetShape();
495 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
496
497 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
498 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
499
500 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
501
502 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
503 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
504}
505
telsoa01c577f2c2018-08-31 09:22:23 +0100506INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
507{
508 ResetParser();
509 m_Model = LoadModelFromFile(graphFile);
510 return CreateNetworkFromModel();
511}
512
513INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
514{
515 ResetParser();
516 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
517 return CreateNetworkFromModel();
518}
519
520INetworkPtr TfLiteParser::CreateNetworkFromModel()
521{
522 m_Network = INetwork::Create();
523 BOOST_ASSERT(m_Model.get() != nullptr);
524
525 bool failedToCreate = false;
526 std::stringstream errors;
527
528 if (m_Model->subgraphs.size() != 1)
529 {
530 throw ParseException(
531 boost::str(
532 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
533 m_Model->subgraphs.size() %
534 CHECK_LOCATION().AsString()));
535 }
536
537 size_t subgraphIndex = 0;
538 for (SubGraphPtr const & subgraph : m_Model->subgraphs)
539 {
540 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
541
542 size_t operatorIndex = 0;
543 for (OperatorPtr const & op : subgraph->operators)
544 {
545 try
546 {
telsoa01c577f2c2018-08-31 09:22:23 +0100547 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
548 auto builtinCode = opCodePtr->builtin_code;
549
550 if (builtinCode > tflite::BuiltinOperator_MAX)
551 {
552 throw ParseException(
553 boost::str(
554 boost::format("Operator code %1% is out of range 0-%2%. "
555 "subgraph:%3% operator idx:%4%. %5%") %
556 builtinCode %
557 tflite::BuiltinOperator_MAX %
558 subgraphIndex %
559 operatorIndex %
560 CHECK_LOCATION().AsString()));
561 }
562
563 // lookup and call the parser function
564 auto & parserFunction = m_ParserFunctions[builtinCode];
565 (this->*parserFunction)(subgraphIndex, operatorIndex);
566 }
567 catch (const ParseException& e)
568 {
569 failedToCreate = true;
570 std::stringstream errorString;
571
572 errorString << "Failed to parse operator #" << operatorIndex
573 << " within subgraph #" << subgraphIndex
574 << " error: " << e.what();
575 BOOST_LOG_TRIVIAL(error) << errorString.str();
576
577 errors << errorString.str() << "\n";
578 }
579 ++operatorIndex;
580 }
581
582 SetupInputLayers(subgraphIndex);
583 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200584 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100585
586 ++subgraphIndex;
587 }
588
589 if (failedToCreate)
590 {
591 // we can skip everything and let the outer exception handler deal with the error
592 throw ParseException(errors.str());
593 }
594
595 // establish the connections from the layer outputs to the inputs of the subsequent layers
596 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
597 {
598 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
599 {
600 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
601 {
602 for (size_t inputSlotIdx = 0;
603 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
604 ++inputSlotIdx)
605 {
606 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
607 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
608 }
609 }
610 }
611 }
612
613 return std::move(m_Network);
614}
615
616void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
617 size_t tensorIndex,
618 armnn::IOutputSlot* slot)
619{
620 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
621 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
622 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
623
624 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
625
626 // assuming there is only one producer for that tensor
627 if (tensorSlots.outputSlot != nullptr)
628 {
629 throw ParseException(boost::str(
630 boost::format("Another layer has already registered itself as the producer of "
631 "subgraph:%1% tensor:%2% %3%") %
632 subgraphIndex %
633 tensorIndex %
634 CHECK_LOCATION().AsString()));
635 }
636
637 tensorSlots.outputSlot = slot;
638}
639
640void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
641 size_t tensorIndex,
642 armnn::IInputSlot* slot)
643{
644 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
645 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
646 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
647
648 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
649 tensorSlots.inputSlots.push_back(slot);
650}
651
652void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
653{
654 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
655 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
656 //
657 auto opcodeIndex = operatorPtr->opcode_index;
658 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
659
660 throw ParseException(
661 boost::str(
662 boost::format("Operator not supported. "
663 "subgraph:%1% operator:%2% "
664 "opcode_index:%3% opcode:%4% / %5% %6%") %
665 subgraphIndex %
666 operatorIndex %
667 opcodeIndex %
668 opcode %
669 tflite::EnumNameBuiltinOperator(opcode) %
670 CHECK_LOCATION().AsString()));
671}
672
telsoa01c577f2c2018-08-31 09:22:23 +0100673void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
674{
675 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
676
677 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
678 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
679
680 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
681
682 Convolution2dDescriptor desc;
683 desc.m_BiasEnabled = false;
684 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
685 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000686 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100687
688 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
689 CHECK_VALID_SIZE(inputs.size(), 2, 3);
690
691 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
692 CHECK_VALID_SIZE(outputs.size(), 1);
693
694 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
695 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
696
697 // assuming input is NHWC
698 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
699 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
700
701 // assuming the filter is OHWI : Output, H, W, Input
702 // which is essentially the same as NHWC
703 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
704 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
705
706 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
707 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
708
Matteo Martincigh747ef822018-12-18 09:26:39 +0000709 auto filterTensorAndData = CreateConstTensor(inputs[1],
710 filterTensorInfo,
711 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100712 armnn::IConnectableLayer* layer;
713
714 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
715
716 if (inputs.size() == 3)
717 {
718 desc.m_BiasEnabled = true;
719 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000720 auto biasTensorAndData = CreateConstTensor(inputs[2],
721 biasTensorInfo,
722 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100723 layer = m_Network->AddConvolution2dLayer(desc,
724 filterTensorAndData.first,
725 biasTensorAndData.first,
726 layerName.c_str());
727 }
728 else
729 {
730 layer = m_Network->AddConvolution2dLayer(desc,
731 filterTensorAndData.first,
732 layerName.c_str());
733 }
734
735 BOOST_ASSERT(layer != nullptr);
736
telsoa01c577f2c2018-08-31 09:22:23 +0100737 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000738 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100739
740 // register the input connection slots for the layer, connections are made after all layers have been created
741 // only the tensors for the inputs are relevant, exclude the const tensors
742 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000743 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100744
jimfly01c25411c2018-11-14 17:47:22 +0000745 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100746 // register the output connection slots for the layer, connections are made after all layers have been created
747 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
748 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
749}
750
751void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
752{
753 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
754
755 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
756 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
757
758 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
759
760 DepthwiseConvolution2dDescriptor desc;
761 desc.m_BiasEnabled = false;
762 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
763 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000764 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100765 // ACL only supports a depth (channel) multiplier of 1, it is not currently stored in the descriptor
766 CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1);
767
768 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
769 CHECK_VALID_SIZE(inputs.size(), 2, 3);
770 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
771 CHECK_VALID_SIZE(outputs.size(), 1);
772
773 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
774 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
775
Matteo Martincigh747ef822018-12-18 09:26:39 +0000776 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100777 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
778 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000779
780 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100781 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
782 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
783
Matteo Martincigh747ef822018-12-18 09:26:39 +0000784 // Reshape weights as [ H, W, I, M ]
785 filterTensorInfo.SetShape({ filterHeight,
786 filterWidth,
787 inputTensorInfo.GetShape()[3],
788 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
789
790 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
791 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
792
telsoa01c577f2c2018-08-31 09:22:23 +0100793 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
794 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
795
Matteo Martincigh747ef822018-12-18 09:26:39 +0000796 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100797 armnn::IConnectableLayer* layer;
798 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
799
800 if (inputs.size() == 3)
801 {
802 desc.m_BiasEnabled = true;
803 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000804 auto biasTensorAndData = CreateConstTensor(inputs[2],
805 biasTensorInfo,
806 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100807 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
808 filterTensorAndData.first,
809 biasTensorAndData.first,
810 layerName.c_str());
811 }
812 else
813 {
814 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
815 filterTensorAndData.first,
816 layerName.c_str());
817 }
818 BOOST_ASSERT(layer != nullptr);
819
telsoa01c577f2c2018-08-31 09:22:23 +0100820 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000821 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100822
823 // register the input connection slots for the layer, connections are made after all layers have been created
824 // only the tensors for the inputs are relevant, exclude the const tensors
825 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000826 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100827
jimfly01c25411c2018-11-14 17:47:22 +0000828 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100829 // register the output connection slots for the layer, connections are made after all layers have been created
830 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
831 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
832}
833
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100834void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
835{
836 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
837}
838
839void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
840{
841 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
842}
843
844void TfLiteParser::ParsePool(size_t subgraphIndex,
845 size_t operatorIndex,
846 PoolingAlgorithm algorithm)
847{
848 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
849
850 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
851 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
852
853 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
854
855 std::string layerName;
856
857 switch (algorithm)
858 {
859 case PoolingAlgorithm::Average:
860 layerName =
861 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
862 break;
863 case PoolingAlgorithm::Max:
864 layerName =
865 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
866 break;
867 default:
868 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
869 }
870
871 Pooling2dDescriptor desc;
872
873 desc.m_PoolType = algorithm;
874 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
875 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
876 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
877 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
878 desc.m_PaddingMethod = PaddingMethod::Exclude;
879 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +0000880 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100881
882 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
883 CHECK_VALID_SIZE(inputs.size(), 1);
884 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
885
886 // assuming input is NHWC
887 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
888 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
889
890 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
891 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
892
893 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
894 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100895
896 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
897
898 BOOST_ASSERT(layer != nullptr);
899
jimfly01c25411c2018-11-14 17:47:22 +0000900 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
901 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100902
903 // register the input connection slots for the layer, connections are made after all layers have been created
904 // only the tensors for the inputs are relevant, exclude the const tensors
905 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000906 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100907
jimfly01c25411c2018-11-14 17:47:22 +0000908 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100909 // register the output connection slots for the layer, connections are made after all layers have been created
910 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
911 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
912}
913
telsoa01c577f2c2018-08-31 09:22:23 +0100914void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
915{
916 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
917 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
918 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
919
920 SoftmaxDescriptor desc;
921 desc.m_Beta = options->beta;
922
923 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
924 CHECK_VALID_SIZE(inputs.size(), 1);
925 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
926 CHECK_VALID_SIZE(outputs.size(), 1);
927
928 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
929 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
930
931 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
932 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
933
934 // register the input connection slots for the layer, connections are made after all layers have been created
935 // only the tensors for the inputs are relevant, exclude the const tensors
936 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
937 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
938
939 // register the output connection slots for the layer, connections are made after all layers have been created
940 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
941 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
942}
943
944armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
945 const armnn::TensorInfo & inputTensorInfo)
946{
947 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
948 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
949 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
950
951 if (inputTensorInfo.GetNumDimensions() > 4)
952 {
953 std::stringstream ss;
954 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
955 << " shape:" << inputTensorInfo.GetShape() << " "
956 << CHECK_LOCATION().AsString();
957 throw ParseException(ss.str());
958 }
959
960 if (squeezeDims.empty())
961 {
962 squeezeDims.assign(dimensionSequence,
963 dimensionSequence+inputTensorInfo.GetNumDimensions());
964 }
965
966 std::vector<uint32_t> outputDims;
967 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
968 {
969 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
970 auto currentDimension = inputTensorInfo.GetShape()[i];
971 if (skipSqueeze || currentDimension != 1)
972 {
973 outputDims.push_back(currentDimension);
974 }
975 }
976
977 if (outputDims.size() > 4)
978 {
979 std::stringstream ss;
980 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
981 << " shape:" << inputTensorInfo.GetShape() << " "
982 << CHECK_LOCATION().AsString();
983 throw ParseException(ss.str());
984 }
985
986 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
987 outputDims.data());
988
989 // we need to preserve the tensor type and the quantization data as well
990 TensorInfo outTensorInfo = inputTensorInfo;
991 outTensorInfo.SetShape(outShape);
992
993 return outTensorInfo;
994}
995
996void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
997{
998 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
999
1000 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1001 CHECK_VALID_SIZE(inputs.size(), 1);
1002
1003 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1004 CHECK_VALID_SIZE(outputs.size(), 1);
1005
1006 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1007 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1008
1009 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1010 armnn::TensorInfo outputTensorInfo =
1011 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1012 inputTensorInfo);
1013
1014 ReshapeDescriptor reshapeDesc;
1015 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1016
1017 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1018 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1019 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1020
1021 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1022 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1023
1024 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1025 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1026}
1027
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001028void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1029{
1030 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1031
1032 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1033 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1034
1035 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1036 CHECK_VALID_SIZE(inputs.size(), 2);
1037
1038 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1039 CHECK_VALID_SIZE(outputs.size(), 1);
1040
1041 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1042 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1043
1044 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1045 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1046
1047 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1048 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1049
1050 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1051 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1052 {
1053 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1054 }
1055 else
1056 {
1057 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1058 }
1059
1060 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1061
1062 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1063 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1064}
1065
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001066void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1067{
1068 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1069
1070 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1071 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1072
1073 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1074 CHECK_VALID_SIZE(inputs.size(), 2);
1075
1076 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1077 CHECK_VALID_SIZE(outputs.size(), 1);
1078
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001079 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1080 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1081
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001082 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1083 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1084
1085 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1086 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1087
1088 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001089 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1090 {
1091 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1092 }
1093 else
1094 {
1095 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1096 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001097
1098 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1099
1100 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1101 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1102}
1103
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001104void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1105{
1106 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1107
1108 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1109 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1110
1111 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1112 CHECK_VALID_SIZE(inputs.size(), 2);
1113
1114 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1115 CHECK_VALID_SIZE(outputs.size(), 1);
1116
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001117 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1118 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1119
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001120 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1121 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1122
1123 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1124 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1125
1126 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001127 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1128 {
1129 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1130 }
1131 else
1132 {
1133 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1134 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001135
1136 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1137
1138 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1139 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1140}
1141
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001142void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1143{
1144 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1145
1146 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1147
1148 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1149 CHECK_VALID_SIZE(outputs.size(), 1);
1150
1151 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1152 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1153
1154 armnn::MeanDescriptor desc;
1155 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1156 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1157 desc.m_Axis = axis;
1158
1159 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1160 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1161
1162 desc.m_KeepDims =
1163 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1164 true : false;
1165
1166 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1167 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1168
1169 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1170
1171 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1172 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1173
1174 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1175 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1176}
1177
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001178void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1179{
1180 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1181
1182 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1183
1184 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1185 CHECK_VALID_SIZE(outputs.size(), 1);
1186
1187 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1188 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1189
1190 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1191 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1192
1193 size_t step = 2;
1194 armnn::PadDescriptor desc;
1195 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1196 {
1197 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1198 }
1199
1200 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1201 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1202
1203 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1204 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1205
1206 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1207 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1208
1209 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1210 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1211}
1212
Finn Williamsc42c3842019-01-22 14:18:11 +00001213
Sadik Armagan58f39192018-09-17 14:14:39 +01001214void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1215{
Finn Williamsc42c3842019-01-22 14:18:11 +00001216 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001217}
1218
1219void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1220{
Finn Williamsc42c3842019-01-22 14:18:11 +00001221 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1222}
Sadik Armagan58f39192018-09-17 14:14:39 +01001223
Finn Williamsc42c3842019-01-22 14:18:11 +00001224void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1225{
1226 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1227}
1228
1229
1230void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1231{
1232 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001233 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1234 boost::ignore_unused(operatorPtr);
1235
1236 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1237 CHECK_VALID_SIZE(inputs.size(), 1);
1238
1239 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1240 CHECK_VALID_SIZE(outputs.size(), 1);
1241
Finn Williamsc42c3842019-01-22 14:18:11 +00001242 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001243 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001244 activationDesc.m_Function = activationType;
1245
1246 switch (activationType)
1247 {
1248 case ActivationFunction::ReLu:
1249 {
1250 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1251 break;
1252 }
1253 case ActivationFunction::BoundedReLu:
1254 {
1255 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1256 activationDesc.m_A = 6.0f;
1257 activationDesc.m_B = 0.0f;
1258 break;
1259 }
1260 case ActivationFunction::Sigmoid:
1261 {
1262 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1263 break;
1264 }
1265 default:
1266 {
1267 throw ParseException(
1268 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1269 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1270 }
1271 }
1272
1273 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001274
1275 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1276 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1277
1278 // register the input connection slots for the layer, connections are made after all layers have been created
1279 // only the tensors for the inputs are relevant, exclude the const tensors
1280 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1281 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1282
1283 // register the output connection slots for the layer, connections are made after all layers have been created
1284 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1285 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1286}
Sadikb94967b2018-09-19 15:30:00 +01001287armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1288 const std::vector<int32_t> & targetDimsIn)
1289{
1290 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1291 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1292
1293 if (stretchDim != targetDimsIn.end())
1294 {
1295 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1296 {
1297 throw ParseException(
1298 boost::str(
1299 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1300 }
1301
1302 auto targetNumElements =
1303 boost::numeric_cast<unsigned int>(
1304 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1305
1306 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1307 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1308 }
1309
1310 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1311
1312 TensorInfo reshapeInfo = inputTensorInfo;
1313 reshapeInfo.SetShape(outputShape);
1314
1315 return reshapeInfo;
1316}
1317
1318void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1319{
1320 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1321
1322 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001323
1324 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1325 CHECK_VALID_SIZE(outputs.size(), 1);
1326
1327 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1328 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1329
1330 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001331 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1332 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001333 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1334
kevmay0171972a82018-12-17 14:28:03 +00001335 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001336 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1337 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001338 {
1339 std::stringstream ss;
1340 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001341 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001342 << " does not equal output shape "
1343 << actualOutputTensorInfo.GetShape()
1344 << ": "
1345 << CHECK_LOCATION().AsString();
1346 throw ParseException(ss.str());
1347 }
1348
Sadikb94967b2018-09-19 15:30:00 +01001349 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001350 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001351
1352 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1353 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001354 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001355
1356 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1357 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1358
1359 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1360 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1361}
1362
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001363void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1364{
1365 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1366
1367 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1368 CHECK_VALID_SIZE(inputs.size(), 2);
1369
1370 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1371 CHECK_VALID_SIZE(outputs.size(), 1);
1372
1373 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1374
1375 // Data for the parsed tensor args (size) must be stored locally.
1376 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1377
1378 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1379 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1380
1381 ResizeBilinearDescriptor desc;
1382 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
1383 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1384 desc.m_DataLayout = armnn::DataLayout::NHWC;
1385
1386 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
1387 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, layerName.c_str());
1388
1389 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1390 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1391
1392 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1393 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1394
1395 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1396 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1397}
1398
Sadik Armagan479045b2018-10-01 11:51:37 +01001399void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1400{
1401 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1402
1403 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1404 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1405
1406 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1407
1408 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1409 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1410 CHECK_VALID_SIZE(outputs.size(), 1);
1411
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001412 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1413 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001414
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001415 const unsigned int concatDimInput = static_cast<unsigned int>(
1416 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001417
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001418 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1419 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001420
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001421 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001422
1423 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1424 {
1425 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1426
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001427 // This set up concatDescriptor view origin
1428 armnnUtils::ProcessConcatInputTensorInfo(
1429 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001430 }
1431
1432 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
1433 IConnectableLayer* layer = m_Network->AddMergerLayer(concatDescriptor, layerName.c_str());
1434
1435 BOOST_ASSERT(layer != nullptr);
1436
1437 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1438 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001439
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001440 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001441
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001442 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001443
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001444 // add fused activation layer
1445 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001446
Sadik Armagan479045b2018-10-01 11:51:37 +01001447 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1448 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1449}
1450
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001451void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1452{
1453 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1454
1455 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1456 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1457
1458 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1459
1460 FullyConnectedDescriptor desc;
1461 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001462 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001463
1464 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1465 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1466 CHECK_VALID_SIZE(outputs.size(), 1);
1467
1468 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1469
1470 // Fully Connected Layer accepts two dimensional weights input
1471 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1472 if (weightsDimension != 2)
1473 {
1474 throw ParseException(
1475 boost::str(
1476 boost::format(
1477 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1478 "Node %2%")
1479 % weightsDimension
1480 % CHECK_LOCATION().AsString()));
1481 }
1482
Matteo Martincigh747ef822018-12-18 09:26:39 +00001483 auto filterTensorAndData = CreateConstTensor(inputs[1],
1484 filterTensorInfo,
1485 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001486 armnn::IConnectableLayer* layer;
1487 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1488
1489 if (inputs.size() == 3)
1490 {
1491 desc.m_BiasEnabled = true;
1492 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001493 auto biasTensorAndData = CreateConstTensor(inputs[2],
1494 biasTensorInfo,
1495 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001496 layer = m_Network->AddFullyConnectedLayer(desc,
1497 filterTensorAndData.first,
1498 biasTensorAndData.first,
1499 layerName.c_str());
1500 }
1501 else
1502 {
1503 layer = m_Network->AddFullyConnectedLayer(desc,
1504 filterTensorAndData.first,
1505 layerName.c_str());
1506 }
1507 BOOST_ASSERT(layer != nullptr);
1508
1509 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1510 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1511
1512 // register the input connection slot for the layer
1513 // only the tensors for the inputs are relevant, exclude the const tensors
1514 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1515 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1516
1517 // we need to add the activation layer and fortunately we don't need to care about the data layout
1518 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1519 options->fused_activation_function);
1520 // register the output connection slots for the layer, connections are made after all layers have been created
1521 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1522 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1523}
1524
keidav011b3e2ea2019-02-21 10:07:37 +00001525void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1526{
1527 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1528
1529 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1530
1531 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1532 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1533 CHECK_VALID_SIZE(outputs.size(), 4);
1534
1535 // Obtain custom options from flexbuffers
1536 auto custom_options = operatorPtr->custom_options;
1537 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1538
1539 // Obtain descriptor information from tf lite
1540 DetectionPostProcessDescriptor desc;
1541 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1542 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1543 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1544 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1545 desc.m_NumClasses = m["num_classes"].AsUInt32();
1546 desc.m_ScaleH = m["h_scale"].AsFloat();
1547 desc.m_ScaleW = m["w_scale"].AsFloat();
1548 desc.m_ScaleX = m["x_scale"].AsFloat();
1549 desc.m_ScaleY = m["y_scale"].AsFloat();
1550
1551 if (!(m["use_regular_non_max_suppression"].IsNull()))
1552 {
1553 desc.m_UseRegularNms = m["use_regular_non_max_suppression"].AsBool();
1554 }
1555 if (!(m["detections_per_class"].IsNull()))
1556 {
1557 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1558 }
1559
1560 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1561 {
1562 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1563 "must be positive and less than or equal to 1.");
1564 }
1565
1566 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
1567 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
1568 armnn::Optional<armnn::PermutationVector&>());
1569
1570 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
1571 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
1572 layerName.c_str());
1573
1574 BOOST_ASSERT(layer != nullptr);
1575
1576 // Register outputs
1577 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
1578 {
1579 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i]);
1580 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
1581 }
1582
1583 // Register the input connection slots for the layer, connections are made after all layers have been created
1584 // only the tensors for the inputs are relevant, exclude the const tensors
1585 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1586 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1587
1588 // Register the output connection slots for the layer, connections are made after all layers have been created
1589 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1590 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
1591 outputTensorIndexes[1],
1592 outputTensorIndexes[2],
1593 outputTensorIndexes[3]});
1594}
1595
Sadik Armagan58f39192018-09-17 14:14:39 +01001596armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
1597 unsigned int outputSlot,
1598 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01001599{
1600 ActivationDescriptor activationDesc;
1601 std::string layerName = prevLayer->GetName();
1602
1603 switch(activationType)
1604 {
1605 case tflite::ActivationFunctionType_NONE:
1606 {
1607 // this is a no-op: return previous layer
1608 return prevLayer;
1609 }
1610 case tflite::ActivationFunctionType_RELU:
1611 {
1612 activationDesc.m_Function = ActivationFunction::ReLu;
1613 layerName += ":RELU";
1614 break;
1615 }
1616 case tflite::ActivationFunctionType_RELU6:
1617 {
1618 activationDesc.m_Function = ActivationFunction::BoundedReLu;
1619 activationDesc.m_A = 6.0f;
1620 activationDesc.m_B = 0.0f;
1621 layerName += ":RELU6";
1622 break;
1623 }
1624 case tflite::ActivationFunctionType_TANH:
1625 {
1626 activationDesc.m_Function = ActivationFunction::TanH;
1627 activationDesc.m_A = 1.0f;
1628 activationDesc.m_B = 1.0f;
1629 layerName += ":TANH";
1630 break;
1631 }
1632
1633 // I only put these here as a reminder what others we could support
1634 case tflite::ActivationFunctionType_RELU_N1_TO_1:
1635 case tflite::ActivationFunctionType_SIGN_BIT:
1636 default:
1637 {
1638 throw ParseException(
1639 boost::str(
1640 boost::format("TfLite parser doesn't suppport fused activation: "
1641 "%1%/%2% %3% ") %
1642 activationType %
1643 tflite::EnumNameActivationFunctionType(activationType) %
1644 CHECK_LOCATION().AsString()));
1645
1646 }
1647 }
1648
1649 IConnectableLayer* activationLayer =
1650 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1651
1652 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
1653 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
1654 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
1655 return activationLayer;
1656}
1657
1658TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
1659{
1660 if (fileName == nullptr)
1661 {
1662 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
1663 CHECK_LOCATION().AsString()));
1664 }
1665 boost::system::error_code errorCode;
1666 boost::filesystem::path pathToFile(fileName);
1667 if (!boost::filesystem::exists(pathToFile, errorCode))
1668 {
1669 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
1670 fileName %
1671 errorCode %
1672 CHECK_LOCATION().AsString()));
1673 }
1674 std::ifstream file(fileName, std::ios::binary);
1675 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
1676 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
1677 fileContent.size());
1678}
1679
1680TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
1681{
1682 if (binaryContent == nullptr)
1683 {
1684 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
1685 CHECK_LOCATION().AsString()));
1686 }
1687 flatbuffers::Verifier verifier(binaryContent, len);
1688 if (verifier.VerifyBuffer<tflite::Model>() == false)
1689 {
1690 throw ParseException(
1691 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
1692 "flatbuffers format. size:%1% %2%") %
1693 len %
1694 CHECK_LOCATION().AsString()));
1695 }
1696 return tflite::UnPackModel(binaryContent);
1697}
1698
1699TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
1700 size_t subgraphIndex,
1701 size_t operatorIndex)
1702{
1703 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1704
1705 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1706 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1707
1708 size_t inputCount = operatorPtr->inputs.size();
1709 TensorRawPtrVector result(inputCount);
1710 for (size_t i=0; i<inputCount; ++i)
1711 {
1712 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
1713 result[i] = subGraphPtr->tensors[inputId].get();
1714 }
1715 return result;
1716}
1717
1718TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
1719 size_t subgraphIndex,
1720 size_t operatorIndex)
1721{
1722 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1723
1724 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1725 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1726
1727 size_t outputCount = operatorPtr->outputs.size();
1728 TensorRawPtrVector result(outputCount);
1729 for (size_t i=0; i<outputCount; ++i)
1730 {
1731 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
1732 CHECK_TENSOR(model, subgraphIndex, outputId);
1733 result[i] = subGraphPtr->tensors[outputId].get();
1734 }
1735 return result;
1736}
1737
1738TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
1739 size_t subgraphIndex)
1740{
1741 CHECK_SUBGRAPH(model, subgraphIndex);
1742 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1743
1744 size_t inputCount = subGraphPtr->inputs.size();
1745 TensorIdRawPtrVector result(inputCount);
1746 for (size_t i=0; i<inputCount; ++i)
1747 {
1748 uint32_t inputId = CHECKED_NON_NEGATIVE(subGraphPtr->inputs[i]);
1749 CHECK_TENSOR(model, subgraphIndex, inputId);
1750 result[i] = std::make_pair(inputId, subGraphPtr->tensors[inputId].get());
1751 }
1752 return result;
1753}
1754
1755TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
1756 size_t subgraphIndex)
1757{
1758 CHECK_SUBGRAPH(model, subgraphIndex);
1759 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1760
1761 size_t outputCount = subGraphPtr->outputs.size();
1762 TensorIdRawPtrVector result(outputCount);
1763 for (size_t i=0; i<outputCount; ++i)
1764 {
1765 uint32_t outputId = CHECKED_NON_NEGATIVE(subGraphPtr->outputs[i]);
1766 result[i] = std::make_pair(outputId, subGraphPtr->tensors[outputId].get());
1767 }
1768 return result;
1769}
1770
1771std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
1772 size_t subgraphIndex,
1773 size_t operatorIndex)
1774{
1775 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1776 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1777 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1778 return operatorPtr->inputs;
1779}
1780
1781std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
1782 size_t subgraphIndex,
1783 size_t operatorIndex)
1784{
1785 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1786 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1787 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1788 return operatorPtr->outputs;
1789}
1790
1791void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
1792 size_t operatorIndex,
1793 IConnectableLayer* layer,
1794 const std::vector<unsigned int>& tensorIndexes)
1795{
1796 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1797 BOOST_ASSERT(layer != nullptr);
1798 if (tensorIndexes.size() != layer->GetNumInputSlots())
1799 {
1800 throw ParseException(
1801 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
1802 " for subgraph:%3% operator index:%4% %5%") %
1803 tensorIndexes.size() %
1804 layer->GetNumInputSlots() %
1805 subgraphIndex %
1806 operatorIndex %
1807 CHECK_LOCATION().AsString()));
1808 }
1809
1810 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
1811 {
1812 unsigned int tensorIndex = tensorIndexes[slotIndex];
1813 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
1814 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
1815 }
1816}
1817
1818void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
1819 size_t operatorIndex,
1820 IConnectableLayer* layer,
1821 const std::vector<unsigned int>& tensorIndexes)
1822{
1823 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1824 BOOST_ASSERT(layer != nullptr);
1825 if (tensorIndexes.size() != layer->GetNumOutputSlots())
1826 {
1827 throw ParseException(
1828 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
1829 " for subgraph:%3% operator index:%4% %5%") %
1830 tensorIndexes.size() %
1831 layer->GetNumOutputSlots() %
1832 subgraphIndex %
1833 operatorIndex %
1834 CHECK_LOCATION().AsString()));
1835 }
1836
1837 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1838 {
1839 unsigned int tensorIndex = tensorIndexes[slotIndex];
1840 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
1841 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
1842 }
1843}
1844
1845void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
1846{
1847 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1848
1849 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
1850 for (auto const & tensorIdAndPtr : inputs)
1851 {
1852 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
1853 IConnectableLayer* layer =
1854 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
1855
1856 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
1857 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1858
1859 RegisterOutputSlots(subgraphIndex,
1860 VIRTUAL_OPERATOR_ID,
1861 layer,
1862 { static_cast<uint32_t>(tensorIdAndPtr.first) });
1863 }
1864}
1865
1866void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
1867{
1868 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1869
1870 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
1871 for (auto const & tensorIdAndPtr : outputs)
1872 {
1873 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
1874 IConnectableLayer* layer =
1875 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
1876
1877 RegisterInputSlots(subgraphIndex,
1878 VIRTUAL_OPERATOR_ID,
1879 layer,
1880 { static_cast<uint32_t>(tensorIdAndPtr.first) });
1881 }
1882}
1883
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02001884void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
1885{
1886 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1887
1888 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
1889 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
1890 {
1891 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
1892 {
1893 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
1894 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
1895 {
1896 TensorRawPtr tensorPtr = subGraphPtr->tensors[tensorIndex].get();
1897 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
1898 auto tensorAndData = CreateConstTensor(tensorPtr,
1899 tensorInfo,
1900 armnn::Optional<armnn::PermutationVector&>());
1901
1902 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
1903 IConnectableLayer *layer =
1904 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
1905
1906 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1907 RegisterOutputSlots(subgraphIndex,
1908 VIRTUAL_OPERATOR_ID,
1909 layer,
1910 { tensorIndex });
1911
1912 }
1913 }
1914 }
1915}
1916
telsoa01c577f2c2018-08-31 09:22:23 +01001917// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
1918TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
1919{
1920 CHECK_BUFFER(model, bufferIndex);
1921 return model->buffers[bufferIndex].get();
1922}
1923
Matteo Martincigh747ef822018-12-18 09:26:39 +00001924template<typename T>
1925std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
1926TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
1927 TfLiteParser::TensorRawPtr tensorPtr,
1928 armnn::TensorInfo& tensorInfo,
1929 armnn::Optional<armnn::PermutationVector&> permutationVector)
1930{
1931 auto constData = CreateConstTensorImpl<T>(bufferPtr,
1932 tensorPtr,
1933 tensorInfo,
1934 permutationVector);
1935 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
1936 return std::make_pair(constData.first, std::move(storage));
1937}
1938
telsoa01c577f2c2018-08-31 09:22:23 +01001939std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
1940TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001941 armnn::TensorInfo& tensorInfo,
1942 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01001943{
1944 CHECK_TENSOR_PTR(tensorPtr);
1945 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
1946 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
1947
1948 switch (tensorInfo.GetDataType())
1949 {
1950 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001951 return CreateConstTensorAndStoreData<float>(bufferPtr,
1952 tensorPtr,
1953 tensorInfo,
1954 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001955 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001956 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
1957 tensorPtr,
1958 tensorInfo,
1959 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001960 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001961 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
1962 tensorPtr,
1963 tensorInfo,
1964 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001965 default:
1966 {
1967 std::stringstream errString;
1968 errString << "Unexpected datatype when creating const tensor: "
1969 << armnn::GetDataTypeName(tensorInfo.GetDataType())
1970 << " shape:" << tensorInfo.GetShape()
1971 << CHECK_LOCATION().AsString();
1972 throw ParseException(errString.str());
1973 }
1974 }
1975}
1976
1977BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
1978 const std::string& name) const
1979{
1980 CHECK_SUBGRAPH(m_Model, subgraphId);
1981 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
1982 for (auto const & input : inputs)
1983 {
1984 if (input.second->name == name)
1985 {
1986 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
1987 return std::make_pair(bindingId, ToTensorInfo(input.second));
1988 }
1989 }
1990
1991 std::stringstream bindings;
1992 for (auto const & input : inputs)
1993 {
1994 bindings << "'" << input.second->name << "' ";
1995 }
1996
1997 throw ParseException(
1998 boost::str(
1999 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2000 "Possible inputs are: [%3%] %4%") %
2001 subgraphId %
2002 name %
2003 bindings.str() %
2004 CHECK_LOCATION().AsString()));
2005}
2006
2007BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2008 const std::string& name) const
2009{
2010 CHECK_SUBGRAPH(m_Model, subgraphId);
2011 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2012 for (auto const & output : outputs)
2013 {
2014 if (output.second->name == name)
2015 {
2016 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
2017 return std::make_pair(bindingId, ToTensorInfo(output.second));
2018 }
2019 }
2020
2021 std::stringstream bindings;
2022 for (auto const & output : outputs)
2023 {
2024 bindings << "'" << output.second->name << "' ";
2025 }
2026
2027 throw ParseException(
2028 boost::str(
2029 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2030 "Possible outputs are: [%3%] %4%") %
2031 subgraphId %
2032 name %
2033 bindings.str() %
2034 CHECK_LOCATION().AsString()));
2035}
2036
2037size_t TfLiteParser::GetSubgraphCount() const
2038{
2039 return m_Model->subgraphs.size();
2040}
2041
2042std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2043{
2044 CHECK_SUBGRAPH(m_Model, subgraphId);
2045 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2046 std::vector<std::string> result;
2047 result.reserve(inputs.size());
2048 for (auto const & input : inputs)
2049 {
2050 result.push_back(input.second->name);
2051 }
2052 return result;
2053}
2054
2055std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2056{
2057 CHECK_SUBGRAPH(m_Model, subgraphId);
2058 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2059 std::vector<std::string> result;
2060 result.reserve(outputs.size());
2061 for (auto const & output : outputs)
2062 {
2063 result.push_back(output.second->name);
2064 }
2065 return result;
2066}
2067
2068ITfLiteParser* ITfLiteParser::CreateRaw()
2069{
2070 return new TfLiteParser();
2071}
2072
2073ITfLiteParserPtr ITfLiteParser::Create()
2074{
2075 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2076}
2077
2078void ITfLiteParser::Destroy(ITfLiteParser* parser)
2079{
2080 delete parser;
2081}
2082
2083TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2084: m_FloatData(std::move(data))
2085, m_Uint8Data(nullptr)
2086, m_Int32Data(nullptr)
2087{
2088}
2089
2090TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2091: m_FloatData(nullptr)
2092, m_Uint8Data(std::move(data))
2093, m_Int32Data(nullptr)
2094{
2095}
2096
2097TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2098: m_FloatData(nullptr)
2099, m_Uint8Data(nullptr)
2100, m_Int32Data(std::move(data))
2101{
2102}
2103
2104} // armnnTfLiteParser