blob: 7ea85bb48ed2de681579cf357dade2a9c264b44d [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
24
25#include <fstream>
26#include <algorithm>
27#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010028#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000029#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010030
31using namespace armnn;
32using armnn::CheckLocation;
33namespace armnnTfLiteParser
34{
35namespace
36{
jimfly01c25411c2018-11-14 17:47:22 +000037
telsoa01c577f2c2018-08-31 09:22:23 +010038const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
39
40void CheckSubgraph(const TfLiteParser::ModelPtr & model,
41 size_t subgraphIndex,
42 const CheckLocation & location)
43{
44 if (model.get() == nullptr)
45 {
46 throw ParseException(
47 boost::str(
48 boost::format("%1% was called with invalid (null) model. "
49 "Possible reason is that the model is not yet loaded and Unpack(ed). "
50 "subgraph:%2% at %3%") %
51 location.m_Function %
52 subgraphIndex %
53 location.FileLine()));
54 }
55 else if (subgraphIndex >= model->subgraphs.size())
56 {
57 throw ParseException(
58 boost::str(
59 boost::format("%1% was called with an invalid subgraph index. "
60 "subgraph:%2% at %3%") %
61 location.m_Function %
62 subgraphIndex %
63 location.FileLine()));
64 }
65}
66
67#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
68 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
69
70void CheckModel(const TfLiteParser::ModelPtr & model,
71 size_t subgraphIndex,
72 size_t operatorIndex,
73 const CheckLocation & location)
74{
75 if (model.get() == nullptr)
76 {
77 throw ParseException(
78 boost::str(
79 boost::format("%1% was called with invalid (null) model. "
80 "Possible reason is that the model is not yet loaded and Unpack(ed). "
81 "subgraph:%2% operator:%3% at %4%") %
82 location.m_Function %
83 subgraphIndex %
84 operatorIndex %
85 location.FileLine()));
86 }
87 else if (subgraphIndex >= model->subgraphs.size())
88 {
89 throw ParseException(
90 boost::str(
91 boost::format("%1% was called with an invalid subgraph index. "
92 "subgraph:%2% operator:%3% at %4%") %
93 location.m_Function %
94 subgraphIndex %
95 operatorIndex %
96 location.FileLine()));
97 }
98 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
99 operatorIndex != VIRTUAL_OPERATOR_ID)
100 {
101 throw ParseException(
102 boost::str(
103 boost::format("%1% was called with an invalid operator index. "
104 "subgraph:%2% operator:%3% at %4%") %
105 location.m_Function %
106 subgraphIndex %
107 operatorIndex %
108 location.FileLine()));
109 }
110}
111
112#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
113 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
114
115void CheckTensor(const TfLiteParser::ModelPtr & model,
116 size_t subgraphIndex,
117 size_t tensorIndex,
118 const CheckLocation & location)
119{
120 // not checking model, because I assume CHECK_MODEL already run
121 // and checked that. An assert would do.
122 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
123
124 // also subgraph index should be checked by CHECK_MODEL so
125 // I only add an assert here
126 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
127
128 // the tensor index is the only one to check here
129 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
130 {
131 throw ParseException(
132 boost::str(
133 boost::format("%1% was called with an invalid tensor index. "
134 "subgraph:%2% tensor:%3% at %4%") %
135 location.m_Function %
136 subgraphIndex %
137 tensorIndex %
138 location.FileLine()));
139 }
140}
141
142#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
143 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
144
145void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
146 const CheckLocation & location)
147{
148 if (rawPtr == nullptr)
149 {
150 throw ParseException(
151 boost::str(
152 boost::format("%1% was called with a null tensor pointer. "
153 "at %2%") %
154 location.m_Function %
155 location.FileLine()));
156
157 }
158}
159
160#define CHECK_TENSOR_PTR(TENSOR_PTR) \
161 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
162
163void CheckBuffer(const TfLiteParser::ModelPtr & model,
164 size_t bufferIndex,
165 const CheckLocation & location)
166{
167 if (model.get() == nullptr)
168 {
169 throw ParseException(
170 boost::str(
171 boost::format("%1% was called with invalid (null) model. "
172 "Possible reason is that the model is not yet loaded and Unpack(ed). "
173 "buffer:%2% at %3%") %
174 location.m_Function %
175 bufferIndex %
176 location.FileLine()));
177 }
178 else if (bufferIndex >= model->buffers.size())
179 {
180 throw ParseException(
181 boost::str(
182 boost::format("%1% was called with an invalid buffer index. "
183 "buffer index:%2% at %3%") %
184 location.m_Function %
185 bufferIndex %
186 location.FileLine()));
187 }
188 else if (model->buffers[bufferIndex].get() == nullptr)
189 {
190 throw ParseException(
191 boost::str(
192 boost::format("The buffer #%1% is null. %3%") %
193 bufferIndex %
194 location.AsString()));
195 }
196}
197
198#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
199 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
200
201void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
202 const armnn::TensorInfo & tensorInfo,
203 uint32_t bufferId,
204 const CheckLocation & location)
205{
206 if (bufferPtr == nullptr)
207 {
208 throw ParseException(
209 boost::str(
210 boost::format("BufferPtr is null for buffer:%1%. %2%") %
211 bufferId %
212 location.AsString()));
213 }
214 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
215 tensorInfo.GetNumBytes() > bufferPtr->data.size())
216 {
217 std::stringstream ss;
218 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
219 << "For tensor: " << tensorInfo.GetShape()
220 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
221 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
222 throw ParseException(ss.str());
223 }
224}
225
226#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
227 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
228
229bool IsActivationSupported(tflite::ActivationFunctionType activationType)
230{
231 switch(activationType)
232 {
233 case tflite::ActivationFunctionType_NONE:
234 case tflite::ActivationFunctionType_RELU:
235 case tflite::ActivationFunctionType_RELU6:
236 case tflite::ActivationFunctionType_TANH:
237 {
238 return true;
239 }
240 default:
241 {
242 return false;
243 }
244 }
245}
246
247#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
248 do { \
249 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
250 { \
251 throw ParseException( \
252 boost::str( \
253 boost::format("TfLite parser doesn't suppport fused activation: " \
254 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
255 OPTION->fused_activation_function % \
256 tflite::EnumNameActivationFunctionType(\
257 OPTION->fused_activation_function) % \
258 __func__ % \
259 SUBGRAPH_INDEX % \
260 OPERATOR_INDEX % \
261 CHECK_LOCATION().FileLine())); \
262 } \
263 } while(false)
264
265
266std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
267{
268 std::vector<unsigned int> result;
269 result.reserve(in.size());
270 for (auto & i : in)
271 {
272 result.push_back(CHECKED_NON_NEGATIVE(i));
273 }
274 return result;
275}
276
277void CalcPadding(uint32_t inputSize,
278 uint32_t filterSize,
279 uint32_t stride,
280 uint32_t& paddingFront,
281 uint32_t& paddingBack,
282 tflite::Padding padding)
283{
284 paddingFront = 0;
285 paddingBack = 0;
286 if (padding == tflite::Padding_SAME)
287 {
288 uint32_t outputSize = (inputSize + stride - 1) / stride;
289 uint32_t temp = (outputSize - 1) * stride + filterSize;
290 if (temp > inputSize)
291 {
292 paddingFront = (temp - inputSize) / 2;
293 paddingBack = (temp - inputSize) - paddingFront;
294 }
295 }
296}
297
298armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
299{
300 armnn::DataType type;
301 CHECK_TENSOR_PTR(tensorPtr);
302
303 switch (tensorPtr->type)
304 {
305 case tflite::TensorType_UINT8:
306 type = armnn::DataType::QuantisedAsymm8;
307 break;
308 case tflite::TensorType_FLOAT32:
309 type = armnn::DataType::Float32;
310 break;
311 case tflite::TensorType_INT32:
312 type = armnn::DataType::Signed32;
313 break;
314
315 default:
316 {
317 CheckLocation location = CHECK_LOCATION();
318 throw ParseException(
319 boost::str(
320 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
321 tensorPtr->type %
322 tflite::EnumNameTensorType(tensorPtr->type) %
323 tensorPtr->name %
324 location.AsString()));
325 }
326 }
327
328 float quantizationScale = 0.0f;
329 int32_t quantizationOffset = 0;
330
331 if (tensorPtr->quantization.get())
332 {
333 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
334 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
335
336 if (tensorPtr->quantization->scale.size() == 1)
337 {
338 quantizationScale = tensorPtr->quantization->scale[0];
339 }
340 if (tensorPtr->quantization->zero_point.size() == 1)
341 {
342 // NOTE: we lose precision here when converting from 64 bit to 32
343 // but this is what we support at the monent in ArmNN
344 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
345 }
346 }
347
348 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
349
350 // two statements (on purpose) for easier debugging:
351 armnn::TensorInfo result(static_cast<unsigned int>(tensorPtr->shape.size()),
352 dimensions.data(),
353 type,
354 quantizationScale,
355 quantizationOffset);
356 return result;
357}
358
359template<typename T>
360std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
361CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
362 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000363 armnn::TensorInfo& tensorInfo,
364 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100365{
366 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
367 BOOST_ASSERT_MSG(bufferPtr != nullptr,
368 boost::str(
369 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
370
371 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000372
373 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
374 {
375 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000376 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
377 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000378 }
379 else
380 {
381 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
382 }
383
telsoa01c577f2c2018-08-31 09:22:23 +0100384 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
385}
386
telsoa01c577f2c2018-08-31 09:22:23 +0100387armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
388{
389 // generate the binding id by shifting the tensor id by 8 bit
390 // and add the subgraph id, which allows 256 subgraphs
391 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
392}
393
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000394bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
395{
396 const unsigned int actualSize = actual.GetNumDimensions();
397 if (actualSize != expected.size())
398 {
399 return false;
400 }
401
402 for (unsigned int i = 0u; i < actualSize; i++)
403 {
404 if (expected[i] < 0 ||
405 actual[i] != static_cast<unsigned int>(expected[i]))
406 {
407 return false;
408 }
409 }
410
411 return true;
412}
413
telsoa01c577f2c2018-08-31 09:22:23 +0100414} // <anonymous>
415
416TfLiteParser::TfLiteParser()
417: m_Network(nullptr, nullptr)
418, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
419{
420 // register supported operators
421 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Sadik Armagan479045b2018-10-01 11:51:37 +0100422 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100423 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
424 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000425 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100426 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000427 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100428 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Sadik Armagan58f39192018-09-17 14:14:39 +0100429 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
430 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100431 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Sadik Armagan479045b2018-10-01 11:51:37 +0100432 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
433 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200434 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200435 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200436 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200437 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200438 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
telsoa01c577f2c2018-08-31 09:22:23 +0100439}
440
441void TfLiteParser::ResetParser()
442{
443 m_Network = armnn::INetworkPtr(nullptr, nullptr);
444 m_Model = nullptr;
445 m_SubgraphConnections.clear();
446}
447
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200448void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
449 size_t operatorIndex,
450 IConnectableLayer *layer)
451{
452 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
453 BOOST_ASSERT(layer != nullptr);
454
455 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
456 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
457
458 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
459
460 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
461 TensorRawPtr tensorPtr = subGraphPtr->tensors[reshapedInputId].get();
462 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
463 TensorRawPtr tensorPtr1 = subGraphPtr->tensors[inputId].get();
464
465 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
466 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
467
468 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
469 {
470 uint32_t id = reshapedInputId;
471 reshapedInputId = inputId;
472 inputId = id;
473
474 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
475 inputTensorInfo = ToTensorInfo(tensorPtr);
476 }
477
478 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
479
480 std::vector<unsigned> reshapedDim;
481 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
482 {
483 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
484 }
485
486 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
487 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
488
489 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
490
491 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
492 armnn::ReshapeDescriptor desc;
493 desc.m_TargetShape = reshapedTensorInfo.GetShape();
494 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
495
496 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
497 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
498
499 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
500
501 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
502 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
503}
504
telsoa01c577f2c2018-08-31 09:22:23 +0100505INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
506{
507 ResetParser();
508 m_Model = LoadModelFromFile(graphFile);
509 return CreateNetworkFromModel();
510}
511
512INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
513{
514 ResetParser();
515 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
516 return CreateNetworkFromModel();
517}
518
519INetworkPtr TfLiteParser::CreateNetworkFromModel()
520{
521 m_Network = INetwork::Create();
522 BOOST_ASSERT(m_Model.get() != nullptr);
523
524 bool failedToCreate = false;
525 std::stringstream errors;
526
527 if (m_Model->subgraphs.size() != 1)
528 {
529 throw ParseException(
530 boost::str(
531 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
532 m_Model->subgraphs.size() %
533 CHECK_LOCATION().AsString()));
534 }
535
536 size_t subgraphIndex = 0;
537 for (SubGraphPtr const & subgraph : m_Model->subgraphs)
538 {
539 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
540
541 size_t operatorIndex = 0;
542 for (OperatorPtr const & op : subgraph->operators)
543 {
544 try
545 {
telsoa01c577f2c2018-08-31 09:22:23 +0100546 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
547 auto builtinCode = opCodePtr->builtin_code;
548
549 if (builtinCode > tflite::BuiltinOperator_MAX)
550 {
551 throw ParseException(
552 boost::str(
553 boost::format("Operator code %1% is out of range 0-%2%. "
554 "subgraph:%3% operator idx:%4%. %5%") %
555 builtinCode %
556 tflite::BuiltinOperator_MAX %
557 subgraphIndex %
558 operatorIndex %
559 CHECK_LOCATION().AsString()));
560 }
561
562 // lookup and call the parser function
563 auto & parserFunction = m_ParserFunctions[builtinCode];
564 (this->*parserFunction)(subgraphIndex, operatorIndex);
565 }
566 catch (const ParseException& e)
567 {
568 failedToCreate = true;
569 std::stringstream errorString;
570
571 errorString << "Failed to parse operator #" << operatorIndex
572 << " within subgraph #" << subgraphIndex
573 << " error: " << e.what();
574 BOOST_LOG_TRIVIAL(error) << errorString.str();
575
576 errors << errorString.str() << "\n";
577 }
578 ++operatorIndex;
579 }
580
581 SetupInputLayers(subgraphIndex);
582 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200583 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100584
585 ++subgraphIndex;
586 }
587
588 if (failedToCreate)
589 {
590 // we can skip everything and let the outer exception handler deal with the error
591 throw ParseException(errors.str());
592 }
593
594 // establish the connections from the layer outputs to the inputs of the subsequent layers
595 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
596 {
597 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
598 {
599 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
600 {
601 for (size_t inputSlotIdx = 0;
602 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
603 ++inputSlotIdx)
604 {
605 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
606 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
607 }
608 }
609 }
610 }
611
612 return std::move(m_Network);
613}
614
615void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
616 size_t tensorIndex,
617 armnn::IOutputSlot* slot)
618{
619 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
620 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
621 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
622
623 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
624
625 // assuming there is only one producer for that tensor
626 if (tensorSlots.outputSlot != nullptr)
627 {
628 throw ParseException(boost::str(
629 boost::format("Another layer has already registered itself as the producer of "
630 "subgraph:%1% tensor:%2% %3%") %
631 subgraphIndex %
632 tensorIndex %
633 CHECK_LOCATION().AsString()));
634 }
635
636 tensorSlots.outputSlot = slot;
637}
638
639void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
640 size_t tensorIndex,
641 armnn::IInputSlot* slot)
642{
643 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
644 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
645 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
646
647 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
648 tensorSlots.inputSlots.push_back(slot);
649}
650
651void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
652{
653 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
654 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
655 //
656 auto opcodeIndex = operatorPtr->opcode_index;
657 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
658
659 throw ParseException(
660 boost::str(
661 boost::format("Operator not supported. "
662 "subgraph:%1% operator:%2% "
663 "opcode_index:%3% opcode:%4% / %5% %6%") %
664 subgraphIndex %
665 operatorIndex %
666 opcodeIndex %
667 opcode %
668 tflite::EnumNameBuiltinOperator(opcode) %
669 CHECK_LOCATION().AsString()));
670}
671
telsoa01c577f2c2018-08-31 09:22:23 +0100672void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
673{
674 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
675
676 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
677 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
678
679 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
680
681 Convolution2dDescriptor desc;
682 desc.m_BiasEnabled = false;
683 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
684 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000685 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100686
687 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
688 CHECK_VALID_SIZE(inputs.size(), 2, 3);
689
690 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
691 CHECK_VALID_SIZE(outputs.size(), 1);
692
693 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
694 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
695
696 // assuming input is NHWC
697 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
698 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
699
700 // assuming the filter is OHWI : Output, H, W, Input
701 // which is essentially the same as NHWC
702 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
703 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
704
705 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
706 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
707
Matteo Martincigh747ef822018-12-18 09:26:39 +0000708 auto filterTensorAndData = CreateConstTensor(inputs[1],
709 filterTensorInfo,
710 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100711 armnn::IConnectableLayer* layer;
712
713 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
714
715 if (inputs.size() == 3)
716 {
717 desc.m_BiasEnabled = true;
718 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000719 auto biasTensorAndData = CreateConstTensor(inputs[2],
720 biasTensorInfo,
721 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100722 layer = m_Network->AddConvolution2dLayer(desc,
723 filterTensorAndData.first,
724 biasTensorAndData.first,
725 layerName.c_str());
726 }
727 else
728 {
729 layer = m_Network->AddConvolution2dLayer(desc,
730 filterTensorAndData.first,
731 layerName.c_str());
732 }
733
734 BOOST_ASSERT(layer != nullptr);
735
telsoa01c577f2c2018-08-31 09:22:23 +0100736 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000737 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100738
739 // register the input connection slots for the layer, connections are made after all layers have been created
740 // only the tensors for the inputs are relevant, exclude the const tensors
741 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000742 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100743
jimfly01c25411c2018-11-14 17:47:22 +0000744 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100745 // register the output connection slots for the layer, connections are made after all layers have been created
746 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
747 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
748}
749
750void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
751{
752 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
753
754 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
755 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
756
757 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
758
759 DepthwiseConvolution2dDescriptor desc;
760 desc.m_BiasEnabled = false;
761 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
762 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000763 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100764 // ACL only supports a depth (channel) multiplier of 1, it is not currently stored in the descriptor
765 CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1);
766
767 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
768 CHECK_VALID_SIZE(inputs.size(), 2, 3);
769 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
770 CHECK_VALID_SIZE(outputs.size(), 1);
771
772 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
773 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
774
Matteo Martincigh747ef822018-12-18 09:26:39 +0000775 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100776 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
777 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000778
779 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100780 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
781 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
782
Matteo Martincigh747ef822018-12-18 09:26:39 +0000783 // Reshape weights as [ H, W, I, M ]
784 filterTensorInfo.SetShape({ filterHeight,
785 filterWidth,
786 inputTensorInfo.GetShape()[3],
787 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
788
789 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
790 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
791
telsoa01c577f2c2018-08-31 09:22:23 +0100792 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
793 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
794
Matteo Martincigh747ef822018-12-18 09:26:39 +0000795 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100796 armnn::IConnectableLayer* layer;
797 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
798
799 if (inputs.size() == 3)
800 {
801 desc.m_BiasEnabled = true;
802 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000803 auto biasTensorAndData = CreateConstTensor(inputs[2],
804 biasTensorInfo,
805 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100806 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
807 filterTensorAndData.first,
808 biasTensorAndData.first,
809 layerName.c_str());
810 }
811 else
812 {
813 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
814 filterTensorAndData.first,
815 layerName.c_str());
816 }
817 BOOST_ASSERT(layer != nullptr);
818
telsoa01c577f2c2018-08-31 09:22:23 +0100819 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000820 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100821
822 // register the input connection slots for the layer, connections are made after all layers have been created
823 // only the tensors for the inputs are relevant, exclude the const tensors
824 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000825 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100826
jimfly01c25411c2018-11-14 17:47:22 +0000827 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100828 // register the output connection slots for the layer, connections are made after all layers have been created
829 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
830 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
831}
832
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100833void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
834{
835 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
836}
837
838void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
839{
840 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
841}
842
843void TfLiteParser::ParsePool(size_t subgraphIndex,
844 size_t operatorIndex,
845 PoolingAlgorithm algorithm)
846{
847 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
848
849 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
850 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
851
852 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
853
854 std::string layerName;
855
856 switch (algorithm)
857 {
858 case PoolingAlgorithm::Average:
859 layerName =
860 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
861 break;
862 case PoolingAlgorithm::Max:
863 layerName =
864 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
865 break;
866 default:
867 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
868 }
869
870 Pooling2dDescriptor desc;
871
872 desc.m_PoolType = algorithm;
873 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
874 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
875 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
876 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
877 desc.m_PaddingMethod = PaddingMethod::Exclude;
878 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +0000879 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100880
881 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
882 CHECK_VALID_SIZE(inputs.size(), 1);
883 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
884
885 // assuming input is NHWC
886 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
887 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
888
889 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
890 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
891
892 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
893 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100894
895 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
896
897 BOOST_ASSERT(layer != nullptr);
898
jimfly01c25411c2018-11-14 17:47:22 +0000899 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
900 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100901
902 // register the input connection slots for the layer, connections are made after all layers have been created
903 // only the tensors for the inputs are relevant, exclude the const tensors
904 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000905 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100906
jimfly01c25411c2018-11-14 17:47:22 +0000907 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100908 // register the output connection slots for the layer, connections are made after all layers have been created
909 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
910 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
911}
912
telsoa01c577f2c2018-08-31 09:22:23 +0100913void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
914{
915 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
916 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
917 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
918
919 SoftmaxDescriptor desc;
920 desc.m_Beta = options->beta;
921
922 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
923 CHECK_VALID_SIZE(inputs.size(), 1);
924 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
925 CHECK_VALID_SIZE(outputs.size(), 1);
926
927 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
928 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
929
930 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
931 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
932
933 // register the input connection slots for the layer, connections are made after all layers have been created
934 // only the tensors for the inputs are relevant, exclude the const tensors
935 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
936 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
937
938 // register the output connection slots for the layer, connections are made after all layers have been created
939 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
940 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
941}
942
943armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
944 const armnn::TensorInfo & inputTensorInfo)
945{
946 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
947 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
948 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
949
950 if (inputTensorInfo.GetNumDimensions() > 4)
951 {
952 std::stringstream ss;
953 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
954 << " shape:" << inputTensorInfo.GetShape() << " "
955 << CHECK_LOCATION().AsString();
956 throw ParseException(ss.str());
957 }
958
959 if (squeezeDims.empty())
960 {
961 squeezeDims.assign(dimensionSequence,
962 dimensionSequence+inputTensorInfo.GetNumDimensions());
963 }
964
965 std::vector<uint32_t> outputDims;
966 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
967 {
968 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
969 auto currentDimension = inputTensorInfo.GetShape()[i];
970 if (skipSqueeze || currentDimension != 1)
971 {
972 outputDims.push_back(currentDimension);
973 }
974 }
975
976 if (outputDims.size() > 4)
977 {
978 std::stringstream ss;
979 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
980 << " shape:" << inputTensorInfo.GetShape() << " "
981 << CHECK_LOCATION().AsString();
982 throw ParseException(ss.str());
983 }
984
985 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
986 outputDims.data());
987
988 // we need to preserve the tensor type and the quantization data as well
989 TensorInfo outTensorInfo = inputTensorInfo;
990 outTensorInfo.SetShape(outShape);
991
992 return outTensorInfo;
993}
994
995void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
996{
997 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
998
999 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1000 CHECK_VALID_SIZE(inputs.size(), 1);
1001
1002 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1003 CHECK_VALID_SIZE(outputs.size(), 1);
1004
1005 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1006 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1007
1008 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1009 armnn::TensorInfo outputTensorInfo =
1010 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1011 inputTensorInfo);
1012
1013 ReshapeDescriptor reshapeDesc;
1014 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1015
1016 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1017 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1018 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1019
1020 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1021 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1022
1023 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1024 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1025}
1026
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001027void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1028{
1029 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1030
1031 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1032 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1033
1034 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1035 CHECK_VALID_SIZE(inputs.size(), 2);
1036
1037 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1038 CHECK_VALID_SIZE(outputs.size(), 1);
1039
1040 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1041 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1042
1043 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1044 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1045
1046 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1047 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1048
1049 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1050 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1051 {
1052 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1053 }
1054 else
1055 {
1056 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1057 }
1058
1059 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1060
1061 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1062 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1063}
1064
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001065void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1066{
1067 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1068
1069 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1070 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1071
1072 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1073 CHECK_VALID_SIZE(inputs.size(), 2);
1074
1075 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1076 CHECK_VALID_SIZE(outputs.size(), 1);
1077
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001078 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1079 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1080
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001081 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1082 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1083
1084 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1085 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1086
1087 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001088 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1089 {
1090 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1091 }
1092 else
1093 {
1094 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1095 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001096
1097 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1098
1099 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1100 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1101}
1102
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001103void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1104{
1105 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1106
1107 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1108 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1109
1110 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1111 CHECK_VALID_SIZE(inputs.size(), 2);
1112
1113 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1114 CHECK_VALID_SIZE(outputs.size(), 1);
1115
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001116 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1117 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1118
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001119 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1120 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1121
1122 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1123 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1124
1125 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001126 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1127 {
1128 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1129 }
1130 else
1131 {
1132 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1133 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001134
1135 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1136
1137 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1138 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1139}
1140
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001141void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1142{
1143 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1144
1145 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1146
1147 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1148 CHECK_VALID_SIZE(outputs.size(), 1);
1149
1150 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1151 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1152
1153 armnn::MeanDescriptor desc;
1154 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1155 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1156 desc.m_Axis = axis;
1157
1158 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1159 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1160
1161 desc.m_KeepDims =
1162 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1163 true : false;
1164
1165 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1166 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1167
1168 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1169
1170 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1171 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1172
1173 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1174 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1175}
1176
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001177void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1178{
1179 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1180
1181 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1182
1183 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1184 CHECK_VALID_SIZE(outputs.size(), 1);
1185
1186 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1187 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1188
1189 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1190 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1191
1192 size_t step = 2;
1193 armnn::PadDescriptor desc;
1194 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1195 {
1196 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1197 }
1198
1199 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1200 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1201
1202 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1203 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1204
1205 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1206 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1207
1208 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1209 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1210}
1211
Finn Williamsc42c3842019-01-22 14:18:11 +00001212
Sadik Armagan58f39192018-09-17 14:14:39 +01001213void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1214{
Finn Williamsc42c3842019-01-22 14:18:11 +00001215 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001216}
1217
1218void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1219{
Finn Williamsc42c3842019-01-22 14:18:11 +00001220 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1221}
Sadik Armagan58f39192018-09-17 14:14:39 +01001222
Finn Williamsc42c3842019-01-22 14:18:11 +00001223void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1224{
1225 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1226}
1227
1228
1229void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1230{
1231 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001232 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1233 boost::ignore_unused(operatorPtr);
1234
1235 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1236 CHECK_VALID_SIZE(inputs.size(), 1);
1237
1238 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1239 CHECK_VALID_SIZE(outputs.size(), 1);
1240
Finn Williamsc42c3842019-01-22 14:18:11 +00001241 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001242 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001243 activationDesc.m_Function = activationType;
1244
1245 switch (activationType)
1246 {
1247 case ActivationFunction::ReLu:
1248 {
1249 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1250 break;
1251 }
1252 case ActivationFunction::BoundedReLu:
1253 {
1254 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1255 activationDesc.m_A = 6.0f;
1256 activationDesc.m_B = 0.0f;
1257 break;
1258 }
1259 case ActivationFunction::Sigmoid:
1260 {
1261 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1262 break;
1263 }
1264 default:
1265 {
1266 throw ParseException(
1267 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1268 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1269 }
1270 }
1271
1272 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001273
1274 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1275 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1276
1277 // register the input connection slots for the layer, connections are made after all layers have been created
1278 // only the tensors for the inputs are relevant, exclude the const tensors
1279 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1280 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1281
1282 // register the output connection slots for the layer, connections are made after all layers have been created
1283 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1284 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1285}
Sadikb94967b2018-09-19 15:30:00 +01001286armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1287 const std::vector<int32_t> & targetDimsIn)
1288{
1289 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1290 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1291
1292 if (stretchDim != targetDimsIn.end())
1293 {
1294 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1295 {
1296 throw ParseException(
1297 boost::str(
1298 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1299 }
1300
1301 auto targetNumElements =
1302 boost::numeric_cast<unsigned int>(
1303 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1304
1305 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1306 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1307 }
1308
1309 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1310
1311 TensorInfo reshapeInfo = inputTensorInfo;
1312 reshapeInfo.SetShape(outputShape);
1313
1314 return reshapeInfo;
1315}
1316
1317void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1318{
1319 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1320
1321 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001322
1323 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1324 CHECK_VALID_SIZE(outputs.size(), 1);
1325
1326 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1327 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1328
1329 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001330 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1331 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001332 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1333
kevmay0171972a82018-12-17 14:28:03 +00001334 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001335 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1336 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001337 {
1338 std::stringstream ss;
1339 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001340 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001341 << " does not equal output shape "
1342 << actualOutputTensorInfo.GetShape()
1343 << ": "
1344 << CHECK_LOCATION().AsString();
1345 throw ParseException(ss.str());
1346 }
1347
Sadikb94967b2018-09-19 15:30:00 +01001348 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001349 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001350
1351 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1352 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001353 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001354
1355 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1356 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1357
1358 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1359 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1360}
1361
Sadik Armagan479045b2018-10-01 11:51:37 +01001362void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1363{
1364 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1365
1366 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1367 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1368
1369 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1370
1371 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1372 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1373 CHECK_VALID_SIZE(outputs.size(), 1);
1374
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001375 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1376 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001377
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001378 const unsigned int concatDimInput = static_cast<unsigned int>(
1379 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001380
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001381 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1382 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001383
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001384 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001385
1386 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1387 {
1388 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1389
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001390 // This set up concatDescriptor view origin
1391 armnnUtils::ProcessConcatInputTensorInfo(
1392 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001393 }
1394
1395 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
1396 IConnectableLayer* layer = m_Network->AddMergerLayer(concatDescriptor, layerName.c_str());
1397
1398 BOOST_ASSERT(layer != nullptr);
1399
1400 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1401 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001402
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001403 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001404
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001405 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001406
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001407 // add fused activation layer
1408 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001409
Sadik Armagan479045b2018-10-01 11:51:37 +01001410 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1411 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1412}
1413
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001414void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1415{
1416 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1417
1418 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1419 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1420
1421 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1422
1423 FullyConnectedDescriptor desc;
1424 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001425 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001426
1427 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1428 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1429 CHECK_VALID_SIZE(outputs.size(), 1);
1430
1431 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1432
1433 // Fully Connected Layer accepts two dimensional weights input
1434 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1435 if (weightsDimension != 2)
1436 {
1437 throw ParseException(
1438 boost::str(
1439 boost::format(
1440 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1441 "Node %2%")
1442 % weightsDimension
1443 % CHECK_LOCATION().AsString()));
1444 }
1445
Matteo Martincigh747ef822018-12-18 09:26:39 +00001446 auto filterTensorAndData = CreateConstTensor(inputs[1],
1447 filterTensorInfo,
1448 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001449 armnn::IConnectableLayer* layer;
1450 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1451
1452 if (inputs.size() == 3)
1453 {
1454 desc.m_BiasEnabled = true;
1455 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001456 auto biasTensorAndData = CreateConstTensor(inputs[2],
1457 biasTensorInfo,
1458 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001459 layer = m_Network->AddFullyConnectedLayer(desc,
1460 filterTensorAndData.first,
1461 biasTensorAndData.first,
1462 layerName.c_str());
1463 }
1464 else
1465 {
1466 layer = m_Network->AddFullyConnectedLayer(desc,
1467 filterTensorAndData.first,
1468 layerName.c_str());
1469 }
1470 BOOST_ASSERT(layer != nullptr);
1471
1472 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1473 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1474
1475 // register the input connection slot for the layer
1476 // only the tensors for the inputs are relevant, exclude the const tensors
1477 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1478 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1479
1480 // we need to add the activation layer and fortunately we don't need to care about the data layout
1481 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1482 options->fused_activation_function);
1483 // register the output connection slots for the layer, connections are made after all layers have been created
1484 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1485 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1486}
1487
keidav011b3e2ea2019-02-21 10:07:37 +00001488void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1489{
1490 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1491
1492 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1493
1494 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1495 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1496 CHECK_VALID_SIZE(outputs.size(), 4);
1497
1498 // Obtain custom options from flexbuffers
1499 auto custom_options = operatorPtr->custom_options;
1500 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1501
1502 // Obtain descriptor information from tf lite
1503 DetectionPostProcessDescriptor desc;
1504 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1505 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1506 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1507 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1508 desc.m_NumClasses = m["num_classes"].AsUInt32();
1509 desc.m_ScaleH = m["h_scale"].AsFloat();
1510 desc.m_ScaleW = m["w_scale"].AsFloat();
1511 desc.m_ScaleX = m["x_scale"].AsFloat();
1512 desc.m_ScaleY = m["y_scale"].AsFloat();
1513
1514 if (!(m["use_regular_non_max_suppression"].IsNull()))
1515 {
1516 desc.m_UseRegularNms = m["use_regular_non_max_suppression"].AsBool();
1517 }
1518 if (!(m["detections_per_class"].IsNull()))
1519 {
1520 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1521 }
1522
1523 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1524 {
1525 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1526 "must be positive and less than or equal to 1.");
1527 }
1528
1529 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
1530 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
1531 armnn::Optional<armnn::PermutationVector&>());
1532
1533 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
1534 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
1535 layerName.c_str());
1536
1537 BOOST_ASSERT(layer != nullptr);
1538
1539 // Register outputs
1540 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
1541 {
1542 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i]);
1543 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
1544 }
1545
1546 // Register the input connection slots for the layer, connections are made after all layers have been created
1547 // only the tensors for the inputs are relevant, exclude the const tensors
1548 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1549 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1550
1551 // Register the output connection slots for the layer, connections are made after all layers have been created
1552 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1553 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
1554 outputTensorIndexes[1],
1555 outputTensorIndexes[2],
1556 outputTensorIndexes[3]});
1557}
1558
Sadik Armagan58f39192018-09-17 14:14:39 +01001559armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
1560 unsigned int outputSlot,
1561 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01001562{
1563 ActivationDescriptor activationDesc;
1564 std::string layerName = prevLayer->GetName();
1565
1566 switch(activationType)
1567 {
1568 case tflite::ActivationFunctionType_NONE:
1569 {
1570 // this is a no-op: return previous layer
1571 return prevLayer;
1572 }
1573 case tflite::ActivationFunctionType_RELU:
1574 {
1575 activationDesc.m_Function = ActivationFunction::ReLu;
1576 layerName += ":RELU";
1577 break;
1578 }
1579 case tflite::ActivationFunctionType_RELU6:
1580 {
1581 activationDesc.m_Function = ActivationFunction::BoundedReLu;
1582 activationDesc.m_A = 6.0f;
1583 activationDesc.m_B = 0.0f;
1584 layerName += ":RELU6";
1585 break;
1586 }
1587 case tflite::ActivationFunctionType_TANH:
1588 {
1589 activationDesc.m_Function = ActivationFunction::TanH;
1590 activationDesc.m_A = 1.0f;
1591 activationDesc.m_B = 1.0f;
1592 layerName += ":TANH";
1593 break;
1594 }
1595
1596 // I only put these here as a reminder what others we could support
1597 case tflite::ActivationFunctionType_RELU_N1_TO_1:
1598 case tflite::ActivationFunctionType_SIGN_BIT:
1599 default:
1600 {
1601 throw ParseException(
1602 boost::str(
1603 boost::format("TfLite parser doesn't suppport fused activation: "
1604 "%1%/%2% %3% ") %
1605 activationType %
1606 tflite::EnumNameActivationFunctionType(activationType) %
1607 CHECK_LOCATION().AsString()));
1608
1609 }
1610 }
1611
1612 IConnectableLayer* activationLayer =
1613 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1614
1615 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
1616 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
1617 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
1618 return activationLayer;
1619}
1620
1621TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
1622{
1623 if (fileName == nullptr)
1624 {
1625 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
1626 CHECK_LOCATION().AsString()));
1627 }
1628 boost::system::error_code errorCode;
1629 boost::filesystem::path pathToFile(fileName);
1630 if (!boost::filesystem::exists(pathToFile, errorCode))
1631 {
1632 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
1633 fileName %
1634 errorCode %
1635 CHECK_LOCATION().AsString()));
1636 }
1637 std::ifstream file(fileName, std::ios::binary);
1638 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
1639 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
1640 fileContent.size());
1641}
1642
1643TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
1644{
1645 if (binaryContent == nullptr)
1646 {
1647 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
1648 CHECK_LOCATION().AsString()));
1649 }
1650 flatbuffers::Verifier verifier(binaryContent, len);
1651 if (verifier.VerifyBuffer<tflite::Model>() == false)
1652 {
1653 throw ParseException(
1654 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
1655 "flatbuffers format. size:%1% %2%") %
1656 len %
1657 CHECK_LOCATION().AsString()));
1658 }
1659 return tflite::UnPackModel(binaryContent);
1660}
1661
1662TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
1663 size_t subgraphIndex,
1664 size_t operatorIndex)
1665{
1666 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1667
1668 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1669 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1670
1671 size_t inputCount = operatorPtr->inputs.size();
1672 TensorRawPtrVector result(inputCount);
1673 for (size_t i=0; i<inputCount; ++i)
1674 {
1675 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
1676 result[i] = subGraphPtr->tensors[inputId].get();
1677 }
1678 return result;
1679}
1680
1681TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
1682 size_t subgraphIndex,
1683 size_t operatorIndex)
1684{
1685 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1686
1687 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1688 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1689
1690 size_t outputCount = operatorPtr->outputs.size();
1691 TensorRawPtrVector result(outputCount);
1692 for (size_t i=0; i<outputCount; ++i)
1693 {
1694 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
1695 CHECK_TENSOR(model, subgraphIndex, outputId);
1696 result[i] = subGraphPtr->tensors[outputId].get();
1697 }
1698 return result;
1699}
1700
1701TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
1702 size_t subgraphIndex)
1703{
1704 CHECK_SUBGRAPH(model, subgraphIndex);
1705 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1706
1707 size_t inputCount = subGraphPtr->inputs.size();
1708 TensorIdRawPtrVector result(inputCount);
1709 for (size_t i=0; i<inputCount; ++i)
1710 {
1711 uint32_t inputId = CHECKED_NON_NEGATIVE(subGraphPtr->inputs[i]);
1712 CHECK_TENSOR(model, subgraphIndex, inputId);
1713 result[i] = std::make_pair(inputId, subGraphPtr->tensors[inputId].get());
1714 }
1715 return result;
1716}
1717
1718TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
1719 size_t subgraphIndex)
1720{
1721 CHECK_SUBGRAPH(model, subgraphIndex);
1722 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1723
1724 size_t outputCount = subGraphPtr->outputs.size();
1725 TensorIdRawPtrVector result(outputCount);
1726 for (size_t i=0; i<outputCount; ++i)
1727 {
1728 uint32_t outputId = CHECKED_NON_NEGATIVE(subGraphPtr->outputs[i]);
1729 result[i] = std::make_pair(outputId, subGraphPtr->tensors[outputId].get());
1730 }
1731 return result;
1732}
1733
1734std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
1735 size_t subgraphIndex,
1736 size_t operatorIndex)
1737{
1738 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1739 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1740 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1741 return operatorPtr->inputs;
1742}
1743
1744std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
1745 size_t subgraphIndex,
1746 size_t operatorIndex)
1747{
1748 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1749 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1750 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1751 return operatorPtr->outputs;
1752}
1753
1754void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
1755 size_t operatorIndex,
1756 IConnectableLayer* layer,
1757 const std::vector<unsigned int>& tensorIndexes)
1758{
1759 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1760 BOOST_ASSERT(layer != nullptr);
1761 if (tensorIndexes.size() != layer->GetNumInputSlots())
1762 {
1763 throw ParseException(
1764 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
1765 " for subgraph:%3% operator index:%4% %5%") %
1766 tensorIndexes.size() %
1767 layer->GetNumInputSlots() %
1768 subgraphIndex %
1769 operatorIndex %
1770 CHECK_LOCATION().AsString()));
1771 }
1772
1773 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
1774 {
1775 unsigned int tensorIndex = tensorIndexes[slotIndex];
1776 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
1777 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
1778 }
1779}
1780
1781void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
1782 size_t operatorIndex,
1783 IConnectableLayer* layer,
1784 const std::vector<unsigned int>& tensorIndexes)
1785{
1786 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1787 BOOST_ASSERT(layer != nullptr);
1788 if (tensorIndexes.size() != layer->GetNumOutputSlots())
1789 {
1790 throw ParseException(
1791 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
1792 " for subgraph:%3% operator index:%4% %5%") %
1793 tensorIndexes.size() %
1794 layer->GetNumOutputSlots() %
1795 subgraphIndex %
1796 operatorIndex %
1797 CHECK_LOCATION().AsString()));
1798 }
1799
1800 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1801 {
1802 unsigned int tensorIndex = tensorIndexes[slotIndex];
1803 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
1804 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
1805 }
1806}
1807
1808void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
1809{
1810 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1811
1812 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
1813 for (auto const & tensorIdAndPtr : inputs)
1814 {
1815 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
1816 IConnectableLayer* layer =
1817 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
1818
1819 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
1820 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1821
1822 RegisterOutputSlots(subgraphIndex,
1823 VIRTUAL_OPERATOR_ID,
1824 layer,
1825 { static_cast<uint32_t>(tensorIdAndPtr.first) });
1826 }
1827}
1828
1829void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
1830{
1831 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1832
1833 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
1834 for (auto const & tensorIdAndPtr : outputs)
1835 {
1836 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
1837 IConnectableLayer* layer =
1838 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
1839
1840 RegisterInputSlots(subgraphIndex,
1841 VIRTUAL_OPERATOR_ID,
1842 layer,
1843 { static_cast<uint32_t>(tensorIdAndPtr.first) });
1844 }
1845}
1846
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02001847void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
1848{
1849 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1850
1851 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
1852 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
1853 {
1854 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
1855 {
1856 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
1857 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
1858 {
1859 TensorRawPtr tensorPtr = subGraphPtr->tensors[tensorIndex].get();
1860 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
1861 auto tensorAndData = CreateConstTensor(tensorPtr,
1862 tensorInfo,
1863 armnn::Optional<armnn::PermutationVector&>());
1864
1865 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
1866 IConnectableLayer *layer =
1867 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
1868
1869 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1870 RegisterOutputSlots(subgraphIndex,
1871 VIRTUAL_OPERATOR_ID,
1872 layer,
1873 { tensorIndex });
1874
1875 }
1876 }
1877 }
1878}
1879
telsoa01c577f2c2018-08-31 09:22:23 +01001880// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
1881TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
1882{
1883 CHECK_BUFFER(model, bufferIndex);
1884 return model->buffers[bufferIndex].get();
1885}
1886
Matteo Martincigh747ef822018-12-18 09:26:39 +00001887template<typename T>
1888std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
1889TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
1890 TfLiteParser::TensorRawPtr tensorPtr,
1891 armnn::TensorInfo& tensorInfo,
1892 armnn::Optional<armnn::PermutationVector&> permutationVector)
1893{
1894 auto constData = CreateConstTensorImpl<T>(bufferPtr,
1895 tensorPtr,
1896 tensorInfo,
1897 permutationVector);
1898 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
1899 return std::make_pair(constData.first, std::move(storage));
1900}
1901
telsoa01c577f2c2018-08-31 09:22:23 +01001902std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
1903TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001904 armnn::TensorInfo& tensorInfo,
1905 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01001906{
1907 CHECK_TENSOR_PTR(tensorPtr);
1908 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
1909 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
1910
1911 switch (tensorInfo.GetDataType())
1912 {
1913 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001914 return CreateConstTensorAndStoreData<float>(bufferPtr,
1915 tensorPtr,
1916 tensorInfo,
1917 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001918 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001919 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
1920 tensorPtr,
1921 tensorInfo,
1922 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001923 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001924 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
1925 tensorPtr,
1926 tensorInfo,
1927 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001928 default:
1929 {
1930 std::stringstream errString;
1931 errString << "Unexpected datatype when creating const tensor: "
1932 << armnn::GetDataTypeName(tensorInfo.GetDataType())
1933 << " shape:" << tensorInfo.GetShape()
1934 << CHECK_LOCATION().AsString();
1935 throw ParseException(errString.str());
1936 }
1937 }
1938}
1939
1940BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
1941 const std::string& name) const
1942{
1943 CHECK_SUBGRAPH(m_Model, subgraphId);
1944 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
1945 for (auto const & input : inputs)
1946 {
1947 if (input.second->name == name)
1948 {
1949 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
1950 return std::make_pair(bindingId, ToTensorInfo(input.second));
1951 }
1952 }
1953
1954 std::stringstream bindings;
1955 for (auto const & input : inputs)
1956 {
1957 bindings << "'" << input.second->name << "' ";
1958 }
1959
1960 throw ParseException(
1961 boost::str(
1962 boost::format("No input binding found for subgraph:%1% and name:%2%. "
1963 "Possible inputs are: [%3%] %4%") %
1964 subgraphId %
1965 name %
1966 bindings.str() %
1967 CHECK_LOCATION().AsString()));
1968}
1969
1970BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
1971 const std::string& name) const
1972{
1973 CHECK_SUBGRAPH(m_Model, subgraphId);
1974 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
1975 for (auto const & output : outputs)
1976 {
1977 if (output.second->name == name)
1978 {
1979 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
1980 return std::make_pair(bindingId, ToTensorInfo(output.second));
1981 }
1982 }
1983
1984 std::stringstream bindings;
1985 for (auto const & output : outputs)
1986 {
1987 bindings << "'" << output.second->name << "' ";
1988 }
1989
1990 throw ParseException(
1991 boost::str(
1992 boost::format("No output binding found for subgraph:%1% and name:%2%. "
1993 "Possible outputs are: [%3%] %4%") %
1994 subgraphId %
1995 name %
1996 bindings.str() %
1997 CHECK_LOCATION().AsString()));
1998}
1999
2000size_t TfLiteParser::GetSubgraphCount() const
2001{
2002 return m_Model->subgraphs.size();
2003}
2004
2005std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2006{
2007 CHECK_SUBGRAPH(m_Model, subgraphId);
2008 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2009 std::vector<std::string> result;
2010 result.reserve(inputs.size());
2011 for (auto const & input : inputs)
2012 {
2013 result.push_back(input.second->name);
2014 }
2015 return result;
2016}
2017
2018std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2019{
2020 CHECK_SUBGRAPH(m_Model, subgraphId);
2021 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2022 std::vector<std::string> result;
2023 result.reserve(outputs.size());
2024 for (auto const & output : outputs)
2025 {
2026 result.push_back(output.second->name);
2027 }
2028 return result;
2029}
2030
2031ITfLiteParser* ITfLiteParser::CreateRaw()
2032{
2033 return new TfLiteParser();
2034}
2035
2036ITfLiteParserPtr ITfLiteParser::Create()
2037{
2038 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2039}
2040
2041void ITfLiteParser::Destroy(ITfLiteParser* parser)
2042{
2043 delete parser;
2044}
2045
2046TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2047: m_FloatData(std::move(data))
2048, m_Uint8Data(nullptr)
2049, m_Int32Data(nullptr)
2050{
2051}
2052
2053TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2054: m_FloatData(nullptr)
2055, m_Uint8Data(std::move(data))
2056, m_Int32Data(nullptr)
2057{
2058}
2059
2060TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2061: m_FloatData(nullptr)
2062, m_Uint8Data(nullptr)
2063, m_Int32Data(std::move(data))
2064{
2065}
2066
2067} // armnnTfLiteParser