blob: b45e5372ff32030e52e4e8b5ac7f48b686fa3925 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
24
25#include <fstream>
26#include <algorithm>
27#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010028#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000029#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010030
31using namespace armnn;
32using armnn::CheckLocation;
33namespace armnnTfLiteParser
34{
35namespace
36{
jimfly01c25411c2018-11-14 17:47:22 +000037
telsoa01c577f2c2018-08-31 09:22:23 +010038const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
39
40void CheckSubgraph(const TfLiteParser::ModelPtr & model,
41 size_t subgraphIndex,
42 const CheckLocation & location)
43{
44 if (model.get() == nullptr)
45 {
46 throw ParseException(
47 boost::str(
48 boost::format("%1% was called with invalid (null) model. "
49 "Possible reason is that the model is not yet loaded and Unpack(ed). "
50 "subgraph:%2% at %3%") %
51 location.m_Function %
52 subgraphIndex %
53 location.FileLine()));
54 }
55 else if (subgraphIndex >= model->subgraphs.size())
56 {
57 throw ParseException(
58 boost::str(
59 boost::format("%1% was called with an invalid subgraph index. "
60 "subgraph:%2% at %3%") %
61 location.m_Function %
62 subgraphIndex %
63 location.FileLine()));
64 }
65}
66
67#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
68 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
69
70void CheckModel(const TfLiteParser::ModelPtr & model,
71 size_t subgraphIndex,
72 size_t operatorIndex,
73 const CheckLocation & location)
74{
75 if (model.get() == nullptr)
76 {
77 throw ParseException(
78 boost::str(
79 boost::format("%1% was called with invalid (null) model. "
80 "Possible reason is that the model is not yet loaded and Unpack(ed). "
81 "subgraph:%2% operator:%3% at %4%") %
82 location.m_Function %
83 subgraphIndex %
84 operatorIndex %
85 location.FileLine()));
86 }
87 else if (subgraphIndex >= model->subgraphs.size())
88 {
89 throw ParseException(
90 boost::str(
91 boost::format("%1% was called with an invalid subgraph index. "
92 "subgraph:%2% operator:%3% at %4%") %
93 location.m_Function %
94 subgraphIndex %
95 operatorIndex %
96 location.FileLine()));
97 }
98 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
99 operatorIndex != VIRTUAL_OPERATOR_ID)
100 {
101 throw ParseException(
102 boost::str(
103 boost::format("%1% was called with an invalid operator index. "
104 "subgraph:%2% operator:%3% at %4%") %
105 location.m_Function %
106 subgraphIndex %
107 operatorIndex %
108 location.FileLine()));
109 }
110}
111
112#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
113 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
114
115void CheckTensor(const TfLiteParser::ModelPtr & model,
116 size_t subgraphIndex,
117 size_t tensorIndex,
118 const CheckLocation & location)
119{
120 // not checking model, because I assume CHECK_MODEL already run
121 // and checked that. An assert would do.
122 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
123
124 // also subgraph index should be checked by CHECK_MODEL so
125 // I only add an assert here
126 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
127
128 // the tensor index is the only one to check here
129 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
130 {
131 throw ParseException(
132 boost::str(
133 boost::format("%1% was called with an invalid tensor index. "
134 "subgraph:%2% tensor:%3% at %4%") %
135 location.m_Function %
136 subgraphIndex %
137 tensorIndex %
138 location.FileLine()));
139 }
140}
141
142#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
143 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
144
145void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
146 const CheckLocation & location)
147{
148 if (rawPtr == nullptr)
149 {
150 throw ParseException(
151 boost::str(
152 boost::format("%1% was called with a null tensor pointer. "
153 "at %2%") %
154 location.m_Function %
155 location.FileLine()));
156
157 }
158}
159
160#define CHECK_TENSOR_PTR(TENSOR_PTR) \
161 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
162
163void CheckBuffer(const TfLiteParser::ModelPtr & model,
164 size_t bufferIndex,
165 const CheckLocation & location)
166{
167 if (model.get() == nullptr)
168 {
169 throw ParseException(
170 boost::str(
171 boost::format("%1% was called with invalid (null) model. "
172 "Possible reason is that the model is not yet loaded and Unpack(ed). "
173 "buffer:%2% at %3%") %
174 location.m_Function %
175 bufferIndex %
176 location.FileLine()));
177 }
178 else if (bufferIndex >= model->buffers.size())
179 {
180 throw ParseException(
181 boost::str(
182 boost::format("%1% was called with an invalid buffer index. "
183 "buffer index:%2% at %3%") %
184 location.m_Function %
185 bufferIndex %
186 location.FileLine()));
187 }
188 else if (model->buffers[bufferIndex].get() == nullptr)
189 {
190 throw ParseException(
191 boost::str(
192 boost::format("The buffer #%1% is null. %3%") %
193 bufferIndex %
194 location.AsString()));
195 }
196}
197
198#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
199 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
200
201void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
202 const armnn::TensorInfo & tensorInfo,
203 uint32_t bufferId,
204 const CheckLocation & location)
205{
206 if (bufferPtr == nullptr)
207 {
208 throw ParseException(
209 boost::str(
210 boost::format("BufferPtr is null for buffer:%1%. %2%") %
211 bufferId %
212 location.AsString()));
213 }
214 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
215 tensorInfo.GetNumBytes() > bufferPtr->data.size())
216 {
217 std::stringstream ss;
218 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
219 << "For tensor: " << tensorInfo.GetShape()
220 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
221 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
222 throw ParseException(ss.str());
223 }
224}
225
226#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
227 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
228
229bool IsActivationSupported(tflite::ActivationFunctionType activationType)
230{
231 switch(activationType)
232 {
233 case tflite::ActivationFunctionType_NONE:
234 case tflite::ActivationFunctionType_RELU:
235 case tflite::ActivationFunctionType_RELU6:
236 case tflite::ActivationFunctionType_TANH:
237 {
238 return true;
239 }
240 default:
241 {
242 return false;
243 }
244 }
245}
246
247#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
248 do { \
249 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
250 { \
251 throw ParseException( \
252 boost::str( \
253 boost::format("TfLite parser doesn't suppport fused activation: " \
254 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
255 OPTION->fused_activation_function % \
256 tflite::EnumNameActivationFunctionType(\
257 OPTION->fused_activation_function) % \
258 __func__ % \
259 SUBGRAPH_INDEX % \
260 OPERATOR_INDEX % \
261 CHECK_LOCATION().FileLine())); \
262 } \
263 } while(false)
264
265
266std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
267{
268 std::vector<unsigned int> result;
269 result.reserve(in.size());
270 for (auto & i : in)
271 {
272 result.push_back(CHECKED_NON_NEGATIVE(i));
273 }
274 return result;
275}
276
277void CalcPadding(uint32_t inputSize,
278 uint32_t filterSize,
279 uint32_t stride,
280 uint32_t& paddingFront,
281 uint32_t& paddingBack,
282 tflite::Padding padding)
283{
284 paddingFront = 0;
285 paddingBack = 0;
286 if (padding == tflite::Padding_SAME)
287 {
288 uint32_t outputSize = (inputSize + stride - 1) / stride;
289 uint32_t temp = (outputSize - 1) * stride + filterSize;
290 if (temp > inputSize)
291 {
292 paddingFront = (temp - inputSize) / 2;
293 paddingBack = (temp - inputSize) - paddingFront;
294 }
295 }
296}
297
298armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
299{
300 armnn::DataType type;
301 CHECK_TENSOR_PTR(tensorPtr);
302
303 switch (tensorPtr->type)
304 {
305 case tflite::TensorType_UINT8:
306 type = armnn::DataType::QuantisedAsymm8;
307 break;
308 case tflite::TensorType_FLOAT32:
309 type = armnn::DataType::Float32;
310 break;
311 case tflite::TensorType_INT32:
312 type = armnn::DataType::Signed32;
313 break;
314
315 default:
316 {
317 CheckLocation location = CHECK_LOCATION();
318 throw ParseException(
319 boost::str(
320 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
321 tensorPtr->type %
322 tflite::EnumNameTensorType(tensorPtr->type) %
323 tensorPtr->name %
324 location.AsString()));
325 }
326 }
327
328 float quantizationScale = 0.0f;
329 int32_t quantizationOffset = 0;
330
331 if (tensorPtr->quantization.get())
332 {
333 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
334 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
335
336 if (tensorPtr->quantization->scale.size() == 1)
337 {
338 quantizationScale = tensorPtr->quantization->scale[0];
339 }
340 if (tensorPtr->quantization->zero_point.size() == 1)
341 {
342 // NOTE: we lose precision here when converting from 64 bit to 32
343 // but this is what we support at the monent in ArmNN
344 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
345 }
346 }
347
348 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
349
350 // two statements (on purpose) for easier debugging:
351 armnn::TensorInfo result(static_cast<unsigned int>(tensorPtr->shape.size()),
352 dimensions.data(),
353 type,
354 quantizationScale,
355 quantizationOffset);
356 return result;
357}
358
359template<typename T>
360std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
361CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
362 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000363 armnn::TensorInfo& tensorInfo,
364 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100365{
366 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
367 BOOST_ASSERT_MSG(bufferPtr != nullptr,
368 boost::str(
369 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
370
371 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000372
373 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
374 {
375 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000376 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
377 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000378 }
379 else
380 {
381 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
382 }
383
telsoa01c577f2c2018-08-31 09:22:23 +0100384 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
385}
386
telsoa01c577f2c2018-08-31 09:22:23 +0100387armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
388{
389 // generate the binding id by shifting the tensor id by 8 bit
390 // and add the subgraph id, which allows 256 subgraphs
391 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
392}
393
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000394bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
395{
396 const unsigned int actualSize = actual.GetNumDimensions();
397 if (actualSize != expected.size())
398 {
399 return false;
400 }
401
402 for (unsigned int i = 0u; i < actualSize; i++)
403 {
404 if (expected[i] < 0 ||
405 actual[i] != static_cast<unsigned int>(expected[i]))
406 {
407 return false;
408 }
409 }
410
411 return true;
412}
413
telsoa01c577f2c2018-08-31 09:22:23 +0100414} // <anonymous>
415
416TfLiteParser::TfLiteParser()
417: m_Network(nullptr, nullptr)
418, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
419{
420 // register supported operators
421 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Sadik Armagan479045b2018-10-01 11:51:37 +0100422 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100423 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
424 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000425 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100426 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000427 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100428 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Sadik Armagan58f39192018-09-17 14:14:39 +0100429 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
430 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100431 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Sadik Armagan479045b2018-10-01 11:51:37 +0100432 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
433 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200434 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200435 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200436 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200437 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
telsoa01c577f2c2018-08-31 09:22:23 +0100438}
439
440void TfLiteParser::ResetParser()
441{
442 m_Network = armnn::INetworkPtr(nullptr, nullptr);
443 m_Model = nullptr;
444 m_SubgraphConnections.clear();
445}
446
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200447void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
448 size_t operatorIndex,
449 IConnectableLayer *layer)
450{
451 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
452 BOOST_ASSERT(layer != nullptr);
453
454 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
455 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
456
457 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
458
459 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
460 TensorRawPtr tensorPtr = subGraphPtr->tensors[reshapedInputId].get();
461 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
462 TensorRawPtr tensorPtr1 = subGraphPtr->tensors[inputId].get();
463
464 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
465 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
466
467 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
468 {
469 uint32_t id = reshapedInputId;
470 reshapedInputId = inputId;
471 inputId = id;
472
473 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
474 inputTensorInfo = ToTensorInfo(tensorPtr);
475 }
476
477 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
478
479 std::vector<unsigned> reshapedDim;
480 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
481 {
482 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
483 }
484
485 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
486 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
487
488 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
489
490 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
491 armnn::ReshapeDescriptor desc;
492 desc.m_TargetShape = reshapedTensorInfo.GetShape();
493 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
494
495 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
496 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
497
498 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
499
500 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
501 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
502}
503
telsoa01c577f2c2018-08-31 09:22:23 +0100504INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
505{
506 ResetParser();
507 m_Model = LoadModelFromFile(graphFile);
508 return CreateNetworkFromModel();
509}
510
511INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
512{
513 ResetParser();
514 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
515 return CreateNetworkFromModel();
516}
517
518INetworkPtr TfLiteParser::CreateNetworkFromModel()
519{
520 m_Network = INetwork::Create();
521 BOOST_ASSERT(m_Model.get() != nullptr);
522
523 bool failedToCreate = false;
524 std::stringstream errors;
525
526 if (m_Model->subgraphs.size() != 1)
527 {
528 throw ParseException(
529 boost::str(
530 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
531 m_Model->subgraphs.size() %
532 CHECK_LOCATION().AsString()));
533 }
534
535 size_t subgraphIndex = 0;
536 for (SubGraphPtr const & subgraph : m_Model->subgraphs)
537 {
538 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
539
540 size_t operatorIndex = 0;
541 for (OperatorPtr const & op : subgraph->operators)
542 {
543 try
544 {
telsoa01c577f2c2018-08-31 09:22:23 +0100545 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
546 auto builtinCode = opCodePtr->builtin_code;
547
548 if (builtinCode > tflite::BuiltinOperator_MAX)
549 {
550 throw ParseException(
551 boost::str(
552 boost::format("Operator code %1% is out of range 0-%2%. "
553 "subgraph:%3% operator idx:%4%. %5%") %
554 builtinCode %
555 tflite::BuiltinOperator_MAX %
556 subgraphIndex %
557 operatorIndex %
558 CHECK_LOCATION().AsString()));
559 }
560
561 // lookup and call the parser function
562 auto & parserFunction = m_ParserFunctions[builtinCode];
563 (this->*parserFunction)(subgraphIndex, operatorIndex);
564 }
565 catch (const ParseException& e)
566 {
567 failedToCreate = true;
568 std::stringstream errorString;
569
570 errorString << "Failed to parse operator #" << operatorIndex
571 << " within subgraph #" << subgraphIndex
572 << " error: " << e.what();
573 BOOST_LOG_TRIVIAL(error) << errorString.str();
574
575 errors << errorString.str() << "\n";
576 }
577 ++operatorIndex;
578 }
579
580 SetupInputLayers(subgraphIndex);
581 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200582 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100583
584 ++subgraphIndex;
585 }
586
587 if (failedToCreate)
588 {
589 // we can skip everything and let the outer exception handler deal with the error
590 throw ParseException(errors.str());
591 }
592
593 // establish the connections from the layer outputs to the inputs of the subsequent layers
594 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
595 {
596 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
597 {
598 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
599 {
600 for (size_t inputSlotIdx = 0;
601 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
602 ++inputSlotIdx)
603 {
604 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
605 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
606 }
607 }
608 }
609 }
610
611 return std::move(m_Network);
612}
613
614void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
615 size_t tensorIndex,
616 armnn::IOutputSlot* slot)
617{
618 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
619 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
620 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
621
622 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
623
624 // assuming there is only one producer for that tensor
625 if (tensorSlots.outputSlot != nullptr)
626 {
627 throw ParseException(boost::str(
628 boost::format("Another layer has already registered itself as the producer of "
629 "subgraph:%1% tensor:%2% %3%") %
630 subgraphIndex %
631 tensorIndex %
632 CHECK_LOCATION().AsString()));
633 }
634
635 tensorSlots.outputSlot = slot;
636}
637
638void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
639 size_t tensorIndex,
640 armnn::IInputSlot* slot)
641{
642 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
643 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
644 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
645
646 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
647 tensorSlots.inputSlots.push_back(slot);
648}
649
650void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
651{
652 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
653 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
654 //
655 auto opcodeIndex = operatorPtr->opcode_index;
656 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
657
658 throw ParseException(
659 boost::str(
660 boost::format("Operator not supported. "
661 "subgraph:%1% operator:%2% "
662 "opcode_index:%3% opcode:%4% / %5% %6%") %
663 subgraphIndex %
664 operatorIndex %
665 opcodeIndex %
666 opcode %
667 tflite::EnumNameBuiltinOperator(opcode) %
668 CHECK_LOCATION().AsString()));
669}
670
telsoa01c577f2c2018-08-31 09:22:23 +0100671void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
672{
673 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
674
675 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
676 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
677
678 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
679
680 Convolution2dDescriptor desc;
681 desc.m_BiasEnabled = false;
682 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
683 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000684 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100685
686 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
687 CHECK_VALID_SIZE(inputs.size(), 2, 3);
688
689 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
690 CHECK_VALID_SIZE(outputs.size(), 1);
691
692 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
693 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
694
695 // assuming input is NHWC
696 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
697 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
698
699 // assuming the filter is OHWI : Output, H, W, Input
700 // which is essentially the same as NHWC
701 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
702 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
703
704 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
705 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
706
Matteo Martincigh747ef822018-12-18 09:26:39 +0000707 auto filterTensorAndData = CreateConstTensor(inputs[1],
708 filterTensorInfo,
709 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100710 armnn::IConnectableLayer* layer;
711
712 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
713
714 if (inputs.size() == 3)
715 {
716 desc.m_BiasEnabled = true;
717 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000718 auto biasTensorAndData = CreateConstTensor(inputs[2],
719 biasTensorInfo,
720 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100721 layer = m_Network->AddConvolution2dLayer(desc,
722 filterTensorAndData.first,
723 biasTensorAndData.first,
724 layerName.c_str());
725 }
726 else
727 {
728 layer = m_Network->AddConvolution2dLayer(desc,
729 filterTensorAndData.first,
730 layerName.c_str());
731 }
732
733 BOOST_ASSERT(layer != nullptr);
734
telsoa01c577f2c2018-08-31 09:22:23 +0100735 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000736 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100737
738 // register the input connection slots for the layer, connections are made after all layers have been created
739 // only the tensors for the inputs are relevant, exclude the const tensors
740 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000741 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100742
jimfly01c25411c2018-11-14 17:47:22 +0000743 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100744 // register the output connection slots for the layer, connections are made after all layers have been created
745 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
746 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
747}
748
749void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
750{
751 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
752
753 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
754 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
755
756 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
757
758 DepthwiseConvolution2dDescriptor desc;
759 desc.m_BiasEnabled = false;
760 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
761 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000762 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100763 // ACL only supports a depth (channel) multiplier of 1, it is not currently stored in the descriptor
764 CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1);
765
766 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
767 CHECK_VALID_SIZE(inputs.size(), 2, 3);
768 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
769 CHECK_VALID_SIZE(outputs.size(), 1);
770
771 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
772 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
773
Matteo Martincigh747ef822018-12-18 09:26:39 +0000774 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100775 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
776 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000777
778 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100779 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
780 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
781
Matteo Martincigh747ef822018-12-18 09:26:39 +0000782 // Reshape weights as [ H, W, I, M ]
783 filterTensorInfo.SetShape({ filterHeight,
784 filterWidth,
785 inputTensorInfo.GetShape()[3],
786 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
787
788 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
789 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
790
telsoa01c577f2c2018-08-31 09:22:23 +0100791 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
792 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
793
Matteo Martincigh747ef822018-12-18 09:26:39 +0000794 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100795 armnn::IConnectableLayer* layer;
796 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
797
798 if (inputs.size() == 3)
799 {
800 desc.m_BiasEnabled = true;
801 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000802 auto biasTensorAndData = CreateConstTensor(inputs[2],
803 biasTensorInfo,
804 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100805 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
806 filterTensorAndData.first,
807 biasTensorAndData.first,
808 layerName.c_str());
809 }
810 else
811 {
812 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
813 filterTensorAndData.first,
814 layerName.c_str());
815 }
816 BOOST_ASSERT(layer != nullptr);
817
telsoa01c577f2c2018-08-31 09:22:23 +0100818 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000819 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100820
821 // register the input connection slots for the layer, connections are made after all layers have been created
822 // only the tensors for the inputs are relevant, exclude the const tensors
823 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000824 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100825
jimfly01c25411c2018-11-14 17:47:22 +0000826 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100827 // register the output connection slots for the layer, connections are made after all layers have been created
828 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
829 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
830}
831
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100832void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
833{
834 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
835}
836
837void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
838{
839 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
840}
841
842void TfLiteParser::ParsePool(size_t subgraphIndex,
843 size_t operatorIndex,
844 PoolingAlgorithm algorithm)
845{
846 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
847
848 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
849 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
850
851 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
852
853 std::string layerName;
854
855 switch (algorithm)
856 {
857 case PoolingAlgorithm::Average:
858 layerName =
859 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
860 break;
861 case PoolingAlgorithm::Max:
862 layerName =
863 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
864 break;
865 default:
866 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
867 }
868
869 Pooling2dDescriptor desc;
870
871 desc.m_PoolType = algorithm;
872 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
873 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
874 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
875 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
876 desc.m_PaddingMethod = PaddingMethod::Exclude;
877 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +0000878 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100879
880 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
881 CHECK_VALID_SIZE(inputs.size(), 1);
882 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
883
884 // assuming input is NHWC
885 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
886 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
887
888 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
889 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
890
891 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
892 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100893
894 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
895
896 BOOST_ASSERT(layer != nullptr);
897
jimfly01c25411c2018-11-14 17:47:22 +0000898 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
899 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100900
901 // register the input connection slots for the layer, connections are made after all layers have been created
902 // only the tensors for the inputs are relevant, exclude the const tensors
903 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000904 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100905
jimfly01c25411c2018-11-14 17:47:22 +0000906 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100907 // register the output connection slots for the layer, connections are made after all layers have been created
908 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
909 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
910}
911
telsoa01c577f2c2018-08-31 09:22:23 +0100912void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
913{
914 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
915 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
916 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
917
918 SoftmaxDescriptor desc;
919 desc.m_Beta = options->beta;
920
921 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
922 CHECK_VALID_SIZE(inputs.size(), 1);
923 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
924 CHECK_VALID_SIZE(outputs.size(), 1);
925
926 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
927 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
928
929 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
930 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
931
932 // register the input connection slots for the layer, connections are made after all layers have been created
933 // only the tensors for the inputs are relevant, exclude the const tensors
934 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
935 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
936
937 // register the output connection slots for the layer, connections are made after all layers have been created
938 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
939 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
940}
941
942armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
943 const armnn::TensorInfo & inputTensorInfo)
944{
945 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
946 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
947 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
948
949 if (inputTensorInfo.GetNumDimensions() > 4)
950 {
951 std::stringstream ss;
952 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
953 << " shape:" << inputTensorInfo.GetShape() << " "
954 << CHECK_LOCATION().AsString();
955 throw ParseException(ss.str());
956 }
957
958 if (squeezeDims.empty())
959 {
960 squeezeDims.assign(dimensionSequence,
961 dimensionSequence+inputTensorInfo.GetNumDimensions());
962 }
963
964 std::vector<uint32_t> outputDims;
965 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
966 {
967 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
968 auto currentDimension = inputTensorInfo.GetShape()[i];
969 if (skipSqueeze || currentDimension != 1)
970 {
971 outputDims.push_back(currentDimension);
972 }
973 }
974
975 if (outputDims.size() > 4)
976 {
977 std::stringstream ss;
978 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
979 << " shape:" << inputTensorInfo.GetShape() << " "
980 << CHECK_LOCATION().AsString();
981 throw ParseException(ss.str());
982 }
983
984 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
985 outputDims.data());
986
987 // we need to preserve the tensor type and the quantization data as well
988 TensorInfo outTensorInfo = inputTensorInfo;
989 outTensorInfo.SetShape(outShape);
990
991 return outTensorInfo;
992}
993
994void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
995{
996 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
997
998 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
999 CHECK_VALID_SIZE(inputs.size(), 1);
1000
1001 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1002 CHECK_VALID_SIZE(outputs.size(), 1);
1003
1004 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1005 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1006
1007 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1008 armnn::TensorInfo outputTensorInfo =
1009 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1010 inputTensorInfo);
1011
1012 ReshapeDescriptor reshapeDesc;
1013 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1014
1015 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1016 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1017 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1018
1019 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1020 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1021
1022 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1023 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1024}
1025
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001026void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1027{
1028 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1029
1030 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1031 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1032
1033 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1034 CHECK_VALID_SIZE(inputs.size(), 2);
1035
1036 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1037 CHECK_VALID_SIZE(outputs.size(), 1);
1038
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001039 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1040 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1041
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001042 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1043 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1044
1045 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1046 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1047
1048 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001049 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1050 {
1051 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1052 }
1053 else
1054 {
1055 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1056 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001057
1058 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1059
1060 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1061 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1062}
1063
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001064void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1065{
1066 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1067
1068 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1069 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1070
1071 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1072 CHECK_VALID_SIZE(inputs.size(), 2);
1073
1074 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1075 CHECK_VALID_SIZE(outputs.size(), 1);
1076
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001077 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1078 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1079
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001080 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1081 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1082
1083 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1084 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1085
1086 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001087 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1088 {
1089 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1090 }
1091 else
1092 {
1093 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1094 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001095
1096 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1097
1098 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1099 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1100}
1101
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001102void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1103{
1104 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1105
1106 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1107
1108 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1109 CHECK_VALID_SIZE(outputs.size(), 1);
1110
1111 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1112 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1113
1114 armnn::MeanDescriptor desc;
1115 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1116 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1117 desc.m_Axis = axis;
1118
1119 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1120 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1121
1122 desc.m_KeepDims =
1123 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1124 true : false;
1125
1126 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1127 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1128
1129 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1130
1131 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1132 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1133
1134 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1135 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1136}
1137
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001138void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1139{
1140 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1141
1142 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1143
1144 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1145 CHECK_VALID_SIZE(outputs.size(), 1);
1146
1147 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1148 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1149
1150 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1151 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1152
1153 size_t step = 2;
1154 armnn::PadDescriptor desc;
1155 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1156 {
1157 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1158 }
1159
1160 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1161 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1162
1163 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1164 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1165
1166 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1167 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1168
1169 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1170 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1171}
1172
Finn Williamsc42c3842019-01-22 14:18:11 +00001173
Sadik Armagan58f39192018-09-17 14:14:39 +01001174void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1175{
Finn Williamsc42c3842019-01-22 14:18:11 +00001176 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001177}
1178
1179void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1180{
Finn Williamsc42c3842019-01-22 14:18:11 +00001181 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1182}
Sadik Armagan58f39192018-09-17 14:14:39 +01001183
Finn Williamsc42c3842019-01-22 14:18:11 +00001184void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1185{
1186 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1187}
1188
1189
1190void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1191{
1192 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001193 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1194 boost::ignore_unused(operatorPtr);
1195
1196 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1197 CHECK_VALID_SIZE(inputs.size(), 1);
1198
1199 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1200 CHECK_VALID_SIZE(outputs.size(), 1);
1201
Finn Williamsc42c3842019-01-22 14:18:11 +00001202 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001203 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001204 activationDesc.m_Function = activationType;
1205
1206 switch (activationType)
1207 {
1208 case ActivationFunction::ReLu:
1209 {
1210 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1211 break;
1212 }
1213 case ActivationFunction::BoundedReLu:
1214 {
1215 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1216 activationDesc.m_A = 6.0f;
1217 activationDesc.m_B = 0.0f;
1218 break;
1219 }
1220 case ActivationFunction::Sigmoid:
1221 {
1222 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1223 break;
1224 }
1225 default:
1226 {
1227 throw ParseException(
1228 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1229 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1230 }
1231 }
1232
1233 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001234
1235 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1236 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1237
1238 // register the input connection slots for the layer, connections are made after all layers have been created
1239 // only the tensors for the inputs are relevant, exclude the const tensors
1240 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1241 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1242
1243 // register the output connection slots for the layer, connections are made after all layers have been created
1244 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1245 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1246}
Sadikb94967b2018-09-19 15:30:00 +01001247armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1248 const std::vector<int32_t> & targetDimsIn)
1249{
1250 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1251 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1252
1253 if (stretchDim != targetDimsIn.end())
1254 {
1255 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1256 {
1257 throw ParseException(
1258 boost::str(
1259 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1260 }
1261
1262 auto targetNumElements =
1263 boost::numeric_cast<unsigned int>(
1264 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1265
1266 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1267 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1268 }
1269
1270 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1271
1272 TensorInfo reshapeInfo = inputTensorInfo;
1273 reshapeInfo.SetShape(outputShape);
1274
1275 return reshapeInfo;
1276}
1277
1278void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1279{
1280 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1281
1282 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001283
1284 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1285 CHECK_VALID_SIZE(outputs.size(), 1);
1286
1287 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1288 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1289
1290 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001291 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1292 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001293 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1294
kevmay0171972a82018-12-17 14:28:03 +00001295 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001296 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1297 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001298 {
1299 std::stringstream ss;
1300 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001301 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001302 << " does not equal output shape "
1303 << actualOutputTensorInfo.GetShape()
1304 << ": "
1305 << CHECK_LOCATION().AsString();
1306 throw ParseException(ss.str());
1307 }
1308
Sadikb94967b2018-09-19 15:30:00 +01001309 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001310 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001311
1312 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1313 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001314 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001315
1316 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1317 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1318
1319 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1320 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1321}
1322
Sadik Armagan479045b2018-10-01 11:51:37 +01001323void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1324{
1325 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1326
1327 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1328 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1329
1330 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1331
1332 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1333 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1334 CHECK_VALID_SIZE(outputs.size(), 1);
1335
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001336 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1337 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001338
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001339 const unsigned int concatDimInput = static_cast<unsigned int>(
1340 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001341
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001342 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1343 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001344
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001345 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001346
1347 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1348 {
1349 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1350
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001351 // This set up concatDescriptor view origin
1352 armnnUtils::ProcessConcatInputTensorInfo(
1353 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001354 }
1355
1356 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
1357 IConnectableLayer* layer = m_Network->AddMergerLayer(concatDescriptor, layerName.c_str());
1358
1359 BOOST_ASSERT(layer != nullptr);
1360
1361 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1362 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001363
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001364 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001365
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001366 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001367
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001368 // add fused activation layer
1369 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001370
Sadik Armagan479045b2018-10-01 11:51:37 +01001371 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1372 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1373}
1374
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001375void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1376{
1377 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1378
1379 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1380 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1381
1382 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1383
1384 FullyConnectedDescriptor desc;
1385 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001386 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001387
1388 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1389 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1390 CHECK_VALID_SIZE(outputs.size(), 1);
1391
1392 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1393
1394 // Fully Connected Layer accepts two dimensional weights input
1395 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1396 if (weightsDimension != 2)
1397 {
1398 throw ParseException(
1399 boost::str(
1400 boost::format(
1401 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1402 "Node %2%")
1403 % weightsDimension
1404 % CHECK_LOCATION().AsString()));
1405 }
1406
Matteo Martincigh747ef822018-12-18 09:26:39 +00001407 auto filterTensorAndData = CreateConstTensor(inputs[1],
1408 filterTensorInfo,
1409 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001410 armnn::IConnectableLayer* layer;
1411 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1412
1413 if (inputs.size() == 3)
1414 {
1415 desc.m_BiasEnabled = true;
1416 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001417 auto biasTensorAndData = CreateConstTensor(inputs[2],
1418 biasTensorInfo,
1419 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001420 layer = m_Network->AddFullyConnectedLayer(desc,
1421 filterTensorAndData.first,
1422 biasTensorAndData.first,
1423 layerName.c_str());
1424 }
1425 else
1426 {
1427 layer = m_Network->AddFullyConnectedLayer(desc,
1428 filterTensorAndData.first,
1429 layerName.c_str());
1430 }
1431 BOOST_ASSERT(layer != nullptr);
1432
1433 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1434 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1435
1436 // register the input connection slot for the layer
1437 // only the tensors for the inputs are relevant, exclude the const tensors
1438 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1439 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1440
1441 // we need to add the activation layer and fortunately we don't need to care about the data layout
1442 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1443 options->fused_activation_function);
1444 // register the output connection slots for the layer, connections are made after all layers have been created
1445 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1446 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1447}
1448
keidav011b3e2ea2019-02-21 10:07:37 +00001449void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1450{
1451 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1452
1453 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1454
1455 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1456 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1457 CHECK_VALID_SIZE(outputs.size(), 4);
1458
1459 // Obtain custom options from flexbuffers
1460 auto custom_options = operatorPtr->custom_options;
1461 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1462
1463 // Obtain descriptor information from tf lite
1464 DetectionPostProcessDescriptor desc;
1465 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1466 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1467 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1468 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1469 desc.m_NumClasses = m["num_classes"].AsUInt32();
1470 desc.m_ScaleH = m["h_scale"].AsFloat();
1471 desc.m_ScaleW = m["w_scale"].AsFloat();
1472 desc.m_ScaleX = m["x_scale"].AsFloat();
1473 desc.m_ScaleY = m["y_scale"].AsFloat();
1474
1475 if (!(m["use_regular_non_max_suppression"].IsNull()))
1476 {
1477 desc.m_UseRegularNms = m["use_regular_non_max_suppression"].AsBool();
1478 }
1479 if (!(m["detections_per_class"].IsNull()))
1480 {
1481 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1482 }
1483
1484 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1485 {
1486 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1487 "must be positive and less than or equal to 1.");
1488 }
1489
1490 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
1491 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
1492 armnn::Optional<armnn::PermutationVector&>());
1493
1494 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
1495 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
1496 layerName.c_str());
1497
1498 BOOST_ASSERT(layer != nullptr);
1499
1500 // Register outputs
1501 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
1502 {
1503 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i]);
1504 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
1505 }
1506
1507 // Register the input connection slots for the layer, connections are made after all layers have been created
1508 // only the tensors for the inputs are relevant, exclude the const tensors
1509 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1510 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1511
1512 // Register the output connection slots for the layer, connections are made after all layers have been created
1513 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1514 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
1515 outputTensorIndexes[1],
1516 outputTensorIndexes[2],
1517 outputTensorIndexes[3]});
1518}
1519
Sadik Armagan58f39192018-09-17 14:14:39 +01001520armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
1521 unsigned int outputSlot,
1522 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01001523{
1524 ActivationDescriptor activationDesc;
1525 std::string layerName = prevLayer->GetName();
1526
1527 switch(activationType)
1528 {
1529 case tflite::ActivationFunctionType_NONE:
1530 {
1531 // this is a no-op: return previous layer
1532 return prevLayer;
1533 }
1534 case tflite::ActivationFunctionType_RELU:
1535 {
1536 activationDesc.m_Function = ActivationFunction::ReLu;
1537 layerName += ":RELU";
1538 break;
1539 }
1540 case tflite::ActivationFunctionType_RELU6:
1541 {
1542 activationDesc.m_Function = ActivationFunction::BoundedReLu;
1543 activationDesc.m_A = 6.0f;
1544 activationDesc.m_B = 0.0f;
1545 layerName += ":RELU6";
1546 break;
1547 }
1548 case tflite::ActivationFunctionType_TANH:
1549 {
1550 activationDesc.m_Function = ActivationFunction::TanH;
1551 activationDesc.m_A = 1.0f;
1552 activationDesc.m_B = 1.0f;
1553 layerName += ":TANH";
1554 break;
1555 }
1556
1557 // I only put these here as a reminder what others we could support
1558 case tflite::ActivationFunctionType_RELU_N1_TO_1:
1559 case tflite::ActivationFunctionType_SIGN_BIT:
1560 default:
1561 {
1562 throw ParseException(
1563 boost::str(
1564 boost::format("TfLite parser doesn't suppport fused activation: "
1565 "%1%/%2% %3% ") %
1566 activationType %
1567 tflite::EnumNameActivationFunctionType(activationType) %
1568 CHECK_LOCATION().AsString()));
1569
1570 }
1571 }
1572
1573 IConnectableLayer* activationLayer =
1574 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1575
1576 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
1577 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
1578 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
1579 return activationLayer;
1580}
1581
1582TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
1583{
1584 if (fileName == nullptr)
1585 {
1586 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
1587 CHECK_LOCATION().AsString()));
1588 }
1589 boost::system::error_code errorCode;
1590 boost::filesystem::path pathToFile(fileName);
1591 if (!boost::filesystem::exists(pathToFile, errorCode))
1592 {
1593 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
1594 fileName %
1595 errorCode %
1596 CHECK_LOCATION().AsString()));
1597 }
1598 std::ifstream file(fileName, std::ios::binary);
1599 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
1600 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
1601 fileContent.size());
1602}
1603
1604TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
1605{
1606 if (binaryContent == nullptr)
1607 {
1608 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
1609 CHECK_LOCATION().AsString()));
1610 }
1611 flatbuffers::Verifier verifier(binaryContent, len);
1612 if (verifier.VerifyBuffer<tflite::Model>() == false)
1613 {
1614 throw ParseException(
1615 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
1616 "flatbuffers format. size:%1% %2%") %
1617 len %
1618 CHECK_LOCATION().AsString()));
1619 }
1620 return tflite::UnPackModel(binaryContent);
1621}
1622
1623TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
1624 size_t subgraphIndex,
1625 size_t operatorIndex)
1626{
1627 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1628
1629 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1630 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1631
1632 size_t inputCount = operatorPtr->inputs.size();
1633 TensorRawPtrVector result(inputCount);
1634 for (size_t i=0; i<inputCount; ++i)
1635 {
1636 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
1637 result[i] = subGraphPtr->tensors[inputId].get();
1638 }
1639 return result;
1640}
1641
1642TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
1643 size_t subgraphIndex,
1644 size_t operatorIndex)
1645{
1646 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1647
1648 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1649 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1650
1651 size_t outputCount = operatorPtr->outputs.size();
1652 TensorRawPtrVector result(outputCount);
1653 for (size_t i=0; i<outputCount; ++i)
1654 {
1655 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
1656 CHECK_TENSOR(model, subgraphIndex, outputId);
1657 result[i] = subGraphPtr->tensors[outputId].get();
1658 }
1659 return result;
1660}
1661
1662TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
1663 size_t subgraphIndex)
1664{
1665 CHECK_SUBGRAPH(model, subgraphIndex);
1666 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1667
1668 size_t inputCount = subGraphPtr->inputs.size();
1669 TensorIdRawPtrVector result(inputCount);
1670 for (size_t i=0; i<inputCount; ++i)
1671 {
1672 uint32_t inputId = CHECKED_NON_NEGATIVE(subGraphPtr->inputs[i]);
1673 CHECK_TENSOR(model, subgraphIndex, inputId);
1674 result[i] = std::make_pair(inputId, subGraphPtr->tensors[inputId].get());
1675 }
1676 return result;
1677}
1678
1679TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
1680 size_t subgraphIndex)
1681{
1682 CHECK_SUBGRAPH(model, subgraphIndex);
1683 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1684
1685 size_t outputCount = subGraphPtr->outputs.size();
1686 TensorIdRawPtrVector result(outputCount);
1687 for (size_t i=0; i<outputCount; ++i)
1688 {
1689 uint32_t outputId = CHECKED_NON_NEGATIVE(subGraphPtr->outputs[i]);
1690 result[i] = std::make_pair(outputId, subGraphPtr->tensors[outputId].get());
1691 }
1692 return result;
1693}
1694
1695std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
1696 size_t subgraphIndex,
1697 size_t operatorIndex)
1698{
1699 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1700 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1701 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1702 return operatorPtr->inputs;
1703}
1704
1705std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
1706 size_t subgraphIndex,
1707 size_t operatorIndex)
1708{
1709 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1710 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1711 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1712 return operatorPtr->outputs;
1713}
1714
1715void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
1716 size_t operatorIndex,
1717 IConnectableLayer* layer,
1718 const std::vector<unsigned int>& tensorIndexes)
1719{
1720 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1721 BOOST_ASSERT(layer != nullptr);
1722 if (tensorIndexes.size() != layer->GetNumInputSlots())
1723 {
1724 throw ParseException(
1725 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
1726 " for subgraph:%3% operator index:%4% %5%") %
1727 tensorIndexes.size() %
1728 layer->GetNumInputSlots() %
1729 subgraphIndex %
1730 operatorIndex %
1731 CHECK_LOCATION().AsString()));
1732 }
1733
1734 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
1735 {
1736 unsigned int tensorIndex = tensorIndexes[slotIndex];
1737 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
1738 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
1739 }
1740}
1741
1742void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
1743 size_t operatorIndex,
1744 IConnectableLayer* layer,
1745 const std::vector<unsigned int>& tensorIndexes)
1746{
1747 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1748 BOOST_ASSERT(layer != nullptr);
1749 if (tensorIndexes.size() != layer->GetNumOutputSlots())
1750 {
1751 throw ParseException(
1752 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
1753 " for subgraph:%3% operator index:%4% %5%") %
1754 tensorIndexes.size() %
1755 layer->GetNumOutputSlots() %
1756 subgraphIndex %
1757 operatorIndex %
1758 CHECK_LOCATION().AsString()));
1759 }
1760
1761 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1762 {
1763 unsigned int tensorIndex = tensorIndexes[slotIndex];
1764 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
1765 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
1766 }
1767}
1768
1769void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
1770{
1771 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1772
1773 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
1774 for (auto const & tensorIdAndPtr : inputs)
1775 {
1776 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
1777 IConnectableLayer* layer =
1778 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
1779
1780 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
1781 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1782
1783 RegisterOutputSlots(subgraphIndex,
1784 VIRTUAL_OPERATOR_ID,
1785 layer,
1786 { static_cast<uint32_t>(tensorIdAndPtr.first) });
1787 }
1788}
1789
1790void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
1791{
1792 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1793
1794 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
1795 for (auto const & tensorIdAndPtr : outputs)
1796 {
1797 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
1798 IConnectableLayer* layer =
1799 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
1800
1801 RegisterInputSlots(subgraphIndex,
1802 VIRTUAL_OPERATOR_ID,
1803 layer,
1804 { static_cast<uint32_t>(tensorIdAndPtr.first) });
1805 }
1806}
1807
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02001808void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
1809{
1810 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1811
1812 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
1813 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
1814 {
1815 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
1816 {
1817 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
1818 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
1819 {
1820 TensorRawPtr tensorPtr = subGraphPtr->tensors[tensorIndex].get();
1821 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
1822 auto tensorAndData = CreateConstTensor(tensorPtr,
1823 tensorInfo,
1824 armnn::Optional<armnn::PermutationVector&>());
1825
1826 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
1827 IConnectableLayer *layer =
1828 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
1829
1830 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1831 RegisterOutputSlots(subgraphIndex,
1832 VIRTUAL_OPERATOR_ID,
1833 layer,
1834 { tensorIndex });
1835
1836 }
1837 }
1838 }
1839}
1840
telsoa01c577f2c2018-08-31 09:22:23 +01001841// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
1842TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
1843{
1844 CHECK_BUFFER(model, bufferIndex);
1845 return model->buffers[bufferIndex].get();
1846}
1847
Matteo Martincigh747ef822018-12-18 09:26:39 +00001848template<typename T>
1849std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
1850TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
1851 TfLiteParser::TensorRawPtr tensorPtr,
1852 armnn::TensorInfo& tensorInfo,
1853 armnn::Optional<armnn::PermutationVector&> permutationVector)
1854{
1855 auto constData = CreateConstTensorImpl<T>(bufferPtr,
1856 tensorPtr,
1857 tensorInfo,
1858 permutationVector);
1859 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
1860 return std::make_pair(constData.first, std::move(storage));
1861}
1862
telsoa01c577f2c2018-08-31 09:22:23 +01001863std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
1864TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001865 armnn::TensorInfo& tensorInfo,
1866 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01001867{
1868 CHECK_TENSOR_PTR(tensorPtr);
1869 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
1870 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
1871
1872 switch (tensorInfo.GetDataType())
1873 {
1874 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001875 return CreateConstTensorAndStoreData<float>(bufferPtr,
1876 tensorPtr,
1877 tensorInfo,
1878 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001879 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001880 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
1881 tensorPtr,
1882 tensorInfo,
1883 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001884 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001885 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
1886 tensorPtr,
1887 tensorInfo,
1888 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001889 default:
1890 {
1891 std::stringstream errString;
1892 errString << "Unexpected datatype when creating const tensor: "
1893 << armnn::GetDataTypeName(tensorInfo.GetDataType())
1894 << " shape:" << tensorInfo.GetShape()
1895 << CHECK_LOCATION().AsString();
1896 throw ParseException(errString.str());
1897 }
1898 }
1899}
1900
1901BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
1902 const std::string& name) const
1903{
1904 CHECK_SUBGRAPH(m_Model, subgraphId);
1905 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
1906 for (auto const & input : inputs)
1907 {
1908 if (input.second->name == name)
1909 {
1910 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
1911 return std::make_pair(bindingId, ToTensorInfo(input.second));
1912 }
1913 }
1914
1915 std::stringstream bindings;
1916 for (auto const & input : inputs)
1917 {
1918 bindings << "'" << input.second->name << "' ";
1919 }
1920
1921 throw ParseException(
1922 boost::str(
1923 boost::format("No input binding found for subgraph:%1% and name:%2%. "
1924 "Possible inputs are: [%3%] %4%") %
1925 subgraphId %
1926 name %
1927 bindings.str() %
1928 CHECK_LOCATION().AsString()));
1929}
1930
1931BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
1932 const std::string& name) const
1933{
1934 CHECK_SUBGRAPH(m_Model, subgraphId);
1935 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
1936 for (auto const & output : outputs)
1937 {
1938 if (output.second->name == name)
1939 {
1940 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
1941 return std::make_pair(bindingId, ToTensorInfo(output.second));
1942 }
1943 }
1944
1945 std::stringstream bindings;
1946 for (auto const & output : outputs)
1947 {
1948 bindings << "'" << output.second->name << "' ";
1949 }
1950
1951 throw ParseException(
1952 boost::str(
1953 boost::format("No output binding found for subgraph:%1% and name:%2%. "
1954 "Possible outputs are: [%3%] %4%") %
1955 subgraphId %
1956 name %
1957 bindings.str() %
1958 CHECK_LOCATION().AsString()));
1959}
1960
1961size_t TfLiteParser::GetSubgraphCount() const
1962{
1963 return m_Model->subgraphs.size();
1964}
1965
1966std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
1967{
1968 CHECK_SUBGRAPH(m_Model, subgraphId);
1969 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
1970 std::vector<std::string> result;
1971 result.reserve(inputs.size());
1972 for (auto const & input : inputs)
1973 {
1974 result.push_back(input.second->name);
1975 }
1976 return result;
1977}
1978
1979std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
1980{
1981 CHECK_SUBGRAPH(m_Model, subgraphId);
1982 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
1983 std::vector<std::string> result;
1984 result.reserve(outputs.size());
1985 for (auto const & output : outputs)
1986 {
1987 result.push_back(output.second->name);
1988 }
1989 return result;
1990}
1991
1992ITfLiteParser* ITfLiteParser::CreateRaw()
1993{
1994 return new TfLiteParser();
1995}
1996
1997ITfLiteParserPtr ITfLiteParser::Create()
1998{
1999 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2000}
2001
2002void ITfLiteParser::Destroy(ITfLiteParser* parser)
2003{
2004 delete parser;
2005}
2006
2007TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2008: m_FloatData(std::move(data))
2009, m_Uint8Data(nullptr)
2010, m_Int32Data(nullptr)
2011{
2012}
2013
2014TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2015: m_FloatData(nullptr)
2016, m_Uint8Data(std::move(data))
2017, m_Int32Data(nullptr)
2018{
2019}
2020
2021TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2022: m_FloatData(nullptr)
2023, m_Uint8Data(nullptr)
2024, m_Int32Data(std::move(data))
2025{
2026}
2027
2028} // armnnTfLiteParser