blob: 359695b94d49e341306119c5324ab9f6f22d0948 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
24
25#include <fstream>
26#include <algorithm>
27#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010028#include <numeric>
telsoa01c577f2c2018-08-31 09:22:23 +010029
30using namespace armnn;
31using armnn::CheckLocation;
32namespace armnnTfLiteParser
33{
34namespace
35{
36const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
37const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
38
jimfly01c25411c2018-11-14 17:47:22 +000039IConnectableLayer* SwizzleIn(INetwork& network,
40 IConnectableLayer* layer,
41 unsigned int inputSlotIndex,
42 const TensorInfo & inputInfo)
43{
44 BOOST_ASSERT(layer != nullptr);
45 // Add swizzle layer
46 std::stringstream name;
47 name << "swizzle_for-" << layer->GetName() << ":in" << inputSlotIndex;
48 IConnectableLayer* const swizzleLayer = network.AddPermuteLayer(NHWCToArmNN, name.str().c_str());
49 // Set swizzled output shape
50 const TensorInfo swizzleOutInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
51 swizzleLayer->GetOutputSlot(0).SetTensorInfo(swizzleOutInfo);
52 // Connect the swizzle layer to the actual layer
53 swizzleLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(inputSlotIndex));
54
55 return swizzleLayer;
56}
57
58IConnectableLayer* DeswizzleOut(INetwork& network,
59 IConnectableLayer* layer,
60 unsigned int outputSlotIndex,
61 const TensorInfo & outputInfo)
62{
63 BOOST_ASSERT(layer != nullptr);
64 // Add deswizzle layer
65 std::stringstream name;
66 name << "deswizzle_for-" << layer->GetName() << ":out" << outputSlotIndex;
67 IConnectableLayer* const deswizzleLayer = network.AddPermuteLayer(ArmNNToNHWC, name.str().c_str());
68 // Set deswizzled output shape
69 deswizzleLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
70 // Set original layer output shape
71 const TensorInfo deswizzleOutInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
72 layer->GetOutputSlot(outputSlotIndex).SetTensorInfo(deswizzleOutInfo);
73 // Connect the actual layer to the deswizzle layer
74 layer->GetOutputSlot(outputSlotIndex).Connect(deswizzleLayer->GetInputSlot(0));
75
76 return deswizzleLayer;
77}
78
telsoa01c577f2c2018-08-31 09:22:23 +010079const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
80
81void CheckSubgraph(const TfLiteParser::ModelPtr & model,
82 size_t subgraphIndex,
83 const CheckLocation & location)
84{
85 if (model.get() == nullptr)
86 {
87 throw ParseException(
88 boost::str(
89 boost::format("%1% was called with invalid (null) model. "
90 "Possible reason is that the model is not yet loaded and Unpack(ed). "
91 "subgraph:%2% at %3%") %
92 location.m_Function %
93 subgraphIndex %
94 location.FileLine()));
95 }
96 else if (subgraphIndex >= model->subgraphs.size())
97 {
98 throw ParseException(
99 boost::str(
100 boost::format("%1% was called with an invalid subgraph index. "
101 "subgraph:%2% at %3%") %
102 location.m_Function %
103 subgraphIndex %
104 location.FileLine()));
105 }
106}
107
108#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
109 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
110
111void CheckModel(const TfLiteParser::ModelPtr & model,
112 size_t subgraphIndex,
113 size_t operatorIndex,
114 const CheckLocation & location)
115{
116 if (model.get() == nullptr)
117 {
118 throw ParseException(
119 boost::str(
120 boost::format("%1% was called with invalid (null) model. "
121 "Possible reason is that the model is not yet loaded and Unpack(ed). "
122 "subgraph:%2% operator:%3% at %4%") %
123 location.m_Function %
124 subgraphIndex %
125 operatorIndex %
126 location.FileLine()));
127 }
128 else if (subgraphIndex >= model->subgraphs.size())
129 {
130 throw ParseException(
131 boost::str(
132 boost::format("%1% was called with an invalid subgraph index. "
133 "subgraph:%2% operator:%3% at %4%") %
134 location.m_Function %
135 subgraphIndex %
136 operatorIndex %
137 location.FileLine()));
138 }
139 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
140 operatorIndex != VIRTUAL_OPERATOR_ID)
141 {
142 throw ParseException(
143 boost::str(
144 boost::format("%1% was called with an invalid operator index. "
145 "subgraph:%2% operator:%3% at %4%") %
146 location.m_Function %
147 subgraphIndex %
148 operatorIndex %
149 location.FileLine()));
150 }
151}
152
153#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
154 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
155
156void CheckTensor(const TfLiteParser::ModelPtr & model,
157 size_t subgraphIndex,
158 size_t tensorIndex,
159 const CheckLocation & location)
160{
161 // not checking model, because I assume CHECK_MODEL already run
162 // and checked that. An assert would do.
163 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
164
165 // also subgraph index should be checked by CHECK_MODEL so
166 // I only add an assert here
167 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
168
169 // the tensor index is the only one to check here
170 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
171 {
172 throw ParseException(
173 boost::str(
174 boost::format("%1% was called with an invalid tensor index. "
175 "subgraph:%2% tensor:%3% at %4%") %
176 location.m_Function %
177 subgraphIndex %
178 tensorIndex %
179 location.FileLine()));
180 }
181}
182
183#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
184 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
185
186void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
187 const CheckLocation & location)
188{
189 if (rawPtr == nullptr)
190 {
191 throw ParseException(
192 boost::str(
193 boost::format("%1% was called with a null tensor pointer. "
194 "at %2%") %
195 location.m_Function %
196 location.FileLine()));
197
198 }
199}
200
201#define CHECK_TENSOR_PTR(TENSOR_PTR) \
202 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
203
204void CheckBuffer(const TfLiteParser::ModelPtr & model,
205 size_t bufferIndex,
206 const CheckLocation & location)
207{
208 if (model.get() == nullptr)
209 {
210 throw ParseException(
211 boost::str(
212 boost::format("%1% was called with invalid (null) model. "
213 "Possible reason is that the model is not yet loaded and Unpack(ed). "
214 "buffer:%2% at %3%") %
215 location.m_Function %
216 bufferIndex %
217 location.FileLine()));
218 }
219 else if (bufferIndex >= model->buffers.size())
220 {
221 throw ParseException(
222 boost::str(
223 boost::format("%1% was called with an invalid buffer index. "
224 "buffer index:%2% at %3%") %
225 location.m_Function %
226 bufferIndex %
227 location.FileLine()));
228 }
229 else if (model->buffers[bufferIndex].get() == nullptr)
230 {
231 throw ParseException(
232 boost::str(
233 boost::format("The buffer #%1% is null. %3%") %
234 bufferIndex %
235 location.AsString()));
236 }
237}
238
239#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
240 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
241
242void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
243 const armnn::TensorInfo & tensorInfo,
244 uint32_t bufferId,
245 const CheckLocation & location)
246{
247 if (bufferPtr == nullptr)
248 {
249 throw ParseException(
250 boost::str(
251 boost::format("BufferPtr is null for buffer:%1%. %2%") %
252 bufferId %
253 location.AsString()));
254 }
255 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
256 tensorInfo.GetNumBytes() > bufferPtr->data.size())
257 {
258 std::stringstream ss;
259 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
260 << "For tensor: " << tensorInfo.GetShape()
261 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
262 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
263 throw ParseException(ss.str());
264 }
265}
266
267#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
268 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
269
270bool IsActivationSupported(tflite::ActivationFunctionType activationType)
271{
272 switch(activationType)
273 {
274 case tflite::ActivationFunctionType_NONE:
275 case tflite::ActivationFunctionType_RELU:
276 case tflite::ActivationFunctionType_RELU6:
277 case tflite::ActivationFunctionType_TANH:
278 {
279 return true;
280 }
281 default:
282 {
283 return false;
284 }
285 }
286}
287
288#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
289 do { \
290 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
291 { \
292 throw ParseException( \
293 boost::str( \
294 boost::format("TfLite parser doesn't suppport fused activation: " \
295 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
296 OPTION->fused_activation_function % \
297 tflite::EnumNameActivationFunctionType(\
298 OPTION->fused_activation_function) % \
299 __func__ % \
300 SUBGRAPH_INDEX % \
301 OPERATOR_INDEX % \
302 CHECK_LOCATION().FileLine())); \
303 } \
304 } while(false)
305
306
307std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
308{
309 std::vector<unsigned int> result;
310 result.reserve(in.size());
311 for (auto & i : in)
312 {
313 result.push_back(CHECKED_NON_NEGATIVE(i));
314 }
315 return result;
316}
317
318void CalcPadding(uint32_t inputSize,
319 uint32_t filterSize,
320 uint32_t stride,
321 uint32_t& paddingFront,
322 uint32_t& paddingBack,
323 tflite::Padding padding)
324{
325 paddingFront = 0;
326 paddingBack = 0;
327 if (padding == tflite::Padding_SAME)
328 {
329 uint32_t outputSize = (inputSize + stride - 1) / stride;
330 uint32_t temp = (outputSize - 1) * stride + filterSize;
331 if (temp > inputSize)
332 {
333 paddingFront = (temp - inputSize) / 2;
334 paddingBack = (temp - inputSize) - paddingFront;
335 }
336 }
337}
338
339armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
340{
341 armnn::DataType type;
342 CHECK_TENSOR_PTR(tensorPtr);
343
344 switch (tensorPtr->type)
345 {
346 case tflite::TensorType_UINT8:
347 type = armnn::DataType::QuantisedAsymm8;
348 break;
349 case tflite::TensorType_FLOAT32:
350 type = armnn::DataType::Float32;
351 break;
352 case tflite::TensorType_INT32:
353 type = armnn::DataType::Signed32;
354 break;
355
356 default:
357 {
358 CheckLocation location = CHECK_LOCATION();
359 throw ParseException(
360 boost::str(
361 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
362 tensorPtr->type %
363 tflite::EnumNameTensorType(tensorPtr->type) %
364 tensorPtr->name %
365 location.AsString()));
366 }
367 }
368
369 float quantizationScale = 0.0f;
370 int32_t quantizationOffset = 0;
371
372 if (tensorPtr->quantization.get())
373 {
374 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
375 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
376
377 if (tensorPtr->quantization->scale.size() == 1)
378 {
379 quantizationScale = tensorPtr->quantization->scale[0];
380 }
381 if (tensorPtr->quantization->zero_point.size() == 1)
382 {
383 // NOTE: we lose precision here when converting from 64 bit to 32
384 // but this is what we support at the monent in ArmNN
385 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
386 }
387 }
388
389 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
390
391 // two statements (on purpose) for easier debugging:
392 armnn::TensorInfo result(static_cast<unsigned int>(tensorPtr->shape.size()),
393 dimensions.data(),
394 type,
395 quantizationScale,
396 quantizationOffset);
397 return result;
398}
399
400template<typename T>
401std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
402CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
403 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000404 armnn::TensorInfo& tensorInfo,
405 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100406{
407 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
408 BOOST_ASSERT_MSG(bufferPtr != nullptr,
409 boost::str(
410 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
411
412 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000413
414 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
415 {
416 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000417 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
418 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000419 }
420 else
421 {
422 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
423 }
424
telsoa01c577f2c2018-08-31 09:22:23 +0100425 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
426}
427
telsoa01c577f2c2018-08-31 09:22:23 +0100428armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
429{
430 // generate the binding id by shifting the tensor id by 8 bit
431 // and add the subgraph id, which allows 256 subgraphs
432 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
433}
434
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000435bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
436{
437 const unsigned int actualSize = actual.GetNumDimensions();
438 if (actualSize != expected.size())
439 {
440 return false;
441 }
442
443 for (unsigned int i = 0u; i < actualSize; i++)
444 {
445 if (expected[i] < 0 ||
446 actual[i] != static_cast<unsigned int>(expected[i]))
447 {
448 return false;
449 }
450 }
451
452 return true;
453}
454
telsoa01c577f2c2018-08-31 09:22:23 +0100455} // <anonymous>
456
457TfLiteParser::TfLiteParser()
458: m_Network(nullptr, nullptr)
459, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
460{
461 // register supported operators
462 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Sadik Armagan479045b2018-10-01 11:51:37 +0100463 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100464 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
465 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100466 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000467 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100468 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Sadik Armagan58f39192018-09-17 14:14:39 +0100469 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
470 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100471 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Sadik Armagan479045b2018-10-01 11:51:37 +0100472 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
473 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200474 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200475 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200476 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200477 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
telsoa01c577f2c2018-08-31 09:22:23 +0100478}
479
480void TfLiteParser::ResetParser()
481{
482 m_Network = armnn::INetworkPtr(nullptr, nullptr);
483 m_Model = nullptr;
484 m_SubgraphConnections.clear();
485}
486
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200487void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
488 size_t operatorIndex,
489 IConnectableLayer *layer)
490{
491 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
492 BOOST_ASSERT(layer != nullptr);
493
494 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
495 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
496
497 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
498
499 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
500 TensorRawPtr tensorPtr = subGraphPtr->tensors[reshapedInputId].get();
501 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
502 TensorRawPtr tensorPtr1 = subGraphPtr->tensors[inputId].get();
503
504 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
505 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
506
507 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
508 {
509 uint32_t id = reshapedInputId;
510 reshapedInputId = inputId;
511 inputId = id;
512
513 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
514 inputTensorInfo = ToTensorInfo(tensorPtr);
515 }
516
517 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
518
519 std::vector<unsigned> reshapedDim;
520 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
521 {
522 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
523 }
524
525 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
526 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
527
528 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
529
530 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
531 armnn::ReshapeDescriptor desc;
532 desc.m_TargetShape = reshapedTensorInfo.GetShape();
533 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
534
535 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
536 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
537
538 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
539
540 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
541 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
542}
543
telsoa01c577f2c2018-08-31 09:22:23 +0100544INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
545{
546 ResetParser();
547 m_Model = LoadModelFromFile(graphFile);
548 return CreateNetworkFromModel();
549}
550
551INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
552{
553 ResetParser();
554 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
555 return CreateNetworkFromModel();
556}
557
558INetworkPtr TfLiteParser::CreateNetworkFromModel()
559{
560 m_Network = INetwork::Create();
561 BOOST_ASSERT(m_Model.get() != nullptr);
562
563 bool failedToCreate = false;
564 std::stringstream errors;
565
566 if (m_Model->subgraphs.size() != 1)
567 {
568 throw ParseException(
569 boost::str(
570 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
571 m_Model->subgraphs.size() %
572 CHECK_LOCATION().AsString()));
573 }
574
575 size_t subgraphIndex = 0;
576 for (SubGraphPtr const & subgraph : m_Model->subgraphs)
577 {
578 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
579
580 size_t operatorIndex = 0;
581 for (OperatorPtr const & op : subgraph->operators)
582 {
583 try
584 {
585 if (op->custom_options.size() > 0)
586 {
587 throw ParseException(
588 boost::str(
589 boost::format("Custom options for op: %1% is not supported. "
590 "It has %2% bytes of custom options. %3%") %
591 op->opcode_index %
592 op->custom_options.size() %
593 CHECK_LOCATION().AsString()));
594 }
595
596 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
597 auto builtinCode = opCodePtr->builtin_code;
598
599 if (builtinCode > tflite::BuiltinOperator_MAX)
600 {
601 throw ParseException(
602 boost::str(
603 boost::format("Operator code %1% is out of range 0-%2%. "
604 "subgraph:%3% operator idx:%4%. %5%") %
605 builtinCode %
606 tflite::BuiltinOperator_MAX %
607 subgraphIndex %
608 operatorIndex %
609 CHECK_LOCATION().AsString()));
610 }
611
612 // lookup and call the parser function
613 auto & parserFunction = m_ParserFunctions[builtinCode];
614 (this->*parserFunction)(subgraphIndex, operatorIndex);
615 }
616 catch (const ParseException& e)
617 {
618 failedToCreate = true;
619 std::stringstream errorString;
620
621 errorString << "Failed to parse operator #" << operatorIndex
622 << " within subgraph #" << subgraphIndex
623 << " error: " << e.what();
624 BOOST_LOG_TRIVIAL(error) << errorString.str();
625
626 errors << errorString.str() << "\n";
627 }
628 ++operatorIndex;
629 }
630
631 SetupInputLayers(subgraphIndex);
632 SetupOutputLayers(subgraphIndex);
633
634 ++subgraphIndex;
635 }
636
637 if (failedToCreate)
638 {
639 // we can skip everything and let the outer exception handler deal with the error
640 throw ParseException(errors.str());
641 }
642
643 // establish the connections from the layer outputs to the inputs of the subsequent layers
644 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
645 {
646 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
647 {
648 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
649 {
650 for (size_t inputSlotIdx = 0;
651 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
652 ++inputSlotIdx)
653 {
654 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
655 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
656 }
657 }
658 }
659 }
660
661 return std::move(m_Network);
662}
663
664void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
665 size_t tensorIndex,
666 armnn::IOutputSlot* slot)
667{
668 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
669 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
670 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
671
672 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
673
674 // assuming there is only one producer for that tensor
675 if (tensorSlots.outputSlot != nullptr)
676 {
677 throw ParseException(boost::str(
678 boost::format("Another layer has already registered itself as the producer of "
679 "subgraph:%1% tensor:%2% %3%") %
680 subgraphIndex %
681 tensorIndex %
682 CHECK_LOCATION().AsString()));
683 }
684
685 tensorSlots.outputSlot = slot;
686}
687
688void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
689 size_t tensorIndex,
690 armnn::IInputSlot* slot)
691{
692 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
693 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
694 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
695
696 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
697 tensorSlots.inputSlots.push_back(slot);
698}
699
700void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
701{
702 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
703 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
704 //
705 auto opcodeIndex = operatorPtr->opcode_index;
706 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
707
708 throw ParseException(
709 boost::str(
710 boost::format("Operator not supported. "
711 "subgraph:%1% operator:%2% "
712 "opcode_index:%3% opcode:%4% / %5% %6%") %
713 subgraphIndex %
714 operatorIndex %
715 opcodeIndex %
716 opcode %
717 tflite::EnumNameBuiltinOperator(opcode) %
718 CHECK_LOCATION().AsString()));
719}
720
telsoa01c577f2c2018-08-31 09:22:23 +0100721void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
722{
723 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
724
725 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
726 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
727
728 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
729
730 Convolution2dDescriptor desc;
731 desc.m_BiasEnabled = false;
732 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
733 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000734 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100735
736 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
737 CHECK_VALID_SIZE(inputs.size(), 2, 3);
738
739 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
740 CHECK_VALID_SIZE(outputs.size(), 1);
741
742 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
743 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
744
745 // assuming input is NHWC
746 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
747 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
748
749 // assuming the filter is OHWI : Output, H, W, Input
750 // which is essentially the same as NHWC
751 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
752 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
753
754 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
755 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
756
Matteo Martincigh747ef822018-12-18 09:26:39 +0000757 auto filterTensorAndData = CreateConstTensor(inputs[1],
758 filterTensorInfo,
759 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100760 armnn::IConnectableLayer* layer;
761
762 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
763
764 if (inputs.size() == 3)
765 {
766 desc.m_BiasEnabled = true;
767 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000768 auto biasTensorAndData = CreateConstTensor(inputs[2],
769 biasTensorInfo,
770 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100771 layer = m_Network->AddConvolution2dLayer(desc,
772 filterTensorAndData.first,
773 biasTensorAndData.first,
774 layerName.c_str());
775 }
776 else
777 {
778 layer = m_Network->AddConvolution2dLayer(desc,
779 filterTensorAndData.first,
780 layerName.c_str());
781 }
782
783 BOOST_ASSERT(layer != nullptr);
784
telsoa01c577f2c2018-08-31 09:22:23 +0100785 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000786 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100787
788 // register the input connection slots for the layer, connections are made after all layers have been created
789 // only the tensors for the inputs are relevant, exclude the const tensors
790 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000791 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100792
jimfly01c25411c2018-11-14 17:47:22 +0000793 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100794 // register the output connection slots for the layer, connections are made after all layers have been created
795 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
796 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
797}
798
799void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
800{
801 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
802
803 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
804 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
805
806 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
807
808 DepthwiseConvolution2dDescriptor desc;
809 desc.m_BiasEnabled = false;
810 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
811 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000812 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100813 // ACL only supports a depth (channel) multiplier of 1, it is not currently stored in the descriptor
814 CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1);
815
816 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
817 CHECK_VALID_SIZE(inputs.size(), 2, 3);
818 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
819 CHECK_VALID_SIZE(outputs.size(), 1);
820
821 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
822 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
823
Matteo Martincigh747ef822018-12-18 09:26:39 +0000824 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100825 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
826 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000827
828 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100829 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
830 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
831
Matteo Martincigh747ef822018-12-18 09:26:39 +0000832 // Reshape weights as [ H, W, I, M ]
833 filterTensorInfo.SetShape({ filterHeight,
834 filterWidth,
835 inputTensorInfo.GetShape()[3],
836 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
837
838 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
839 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
840
telsoa01c577f2c2018-08-31 09:22:23 +0100841 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
842 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
843
Matteo Martincigh747ef822018-12-18 09:26:39 +0000844 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100845 armnn::IConnectableLayer* layer;
846 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
847
848 if (inputs.size() == 3)
849 {
850 desc.m_BiasEnabled = true;
851 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000852 auto biasTensorAndData = CreateConstTensor(inputs[2],
853 biasTensorInfo,
854 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100855 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
856 filterTensorAndData.first,
857 biasTensorAndData.first,
858 layerName.c_str());
859 }
860 else
861 {
862 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
863 filterTensorAndData.first,
864 layerName.c_str());
865 }
866 BOOST_ASSERT(layer != nullptr);
867
telsoa01c577f2c2018-08-31 09:22:23 +0100868 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000869 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100870
871 // register the input connection slots for the layer, connections are made after all layers have been created
872 // only the tensors for the inputs are relevant, exclude the const tensors
873 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000874 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100875
jimfly01c25411c2018-11-14 17:47:22 +0000876 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100877 // register the output connection slots for the layer, connections are made after all layers have been created
878 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
879 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
880}
881
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100882void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
883{
884 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
885}
886
887void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
888{
889 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
890}
891
892void TfLiteParser::ParsePool(size_t subgraphIndex,
893 size_t operatorIndex,
894 PoolingAlgorithm algorithm)
895{
896 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
897
898 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
899 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
900
901 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
902
903 std::string layerName;
904
905 switch (algorithm)
906 {
907 case PoolingAlgorithm::Average:
908 layerName =
909 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
910 break;
911 case PoolingAlgorithm::Max:
912 layerName =
913 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
914 break;
915 default:
916 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
917 }
918
919 Pooling2dDescriptor desc;
920
921 desc.m_PoolType = algorithm;
922 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
923 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
924 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
925 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
926 desc.m_PaddingMethod = PaddingMethod::Exclude;
927 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +0000928 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100929
930 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
931 CHECK_VALID_SIZE(inputs.size(), 1);
932 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
933
934 // assuming input is NHWC
935 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
936 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
937
938 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
939 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
940
941 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
942 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100943
944 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
945
946 BOOST_ASSERT(layer != nullptr);
947
jimfly01c25411c2018-11-14 17:47:22 +0000948 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
949 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100950
951 // register the input connection slots for the layer, connections are made after all layers have been created
952 // only the tensors for the inputs are relevant, exclude the const tensors
953 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000954 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100955
jimfly01c25411c2018-11-14 17:47:22 +0000956 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100957 // register the output connection slots for the layer, connections are made after all layers have been created
958 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
959 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
960}
961
telsoa01c577f2c2018-08-31 09:22:23 +0100962void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
963{
964 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
965 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
966 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
967
968 SoftmaxDescriptor desc;
969 desc.m_Beta = options->beta;
970
971 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
972 CHECK_VALID_SIZE(inputs.size(), 1);
973 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
974 CHECK_VALID_SIZE(outputs.size(), 1);
975
976 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
977 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
978
979 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
980 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
981
982 // register the input connection slots for the layer, connections are made after all layers have been created
983 // only the tensors for the inputs are relevant, exclude the const tensors
984 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
985 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
986
987 // register the output connection slots for the layer, connections are made after all layers have been created
988 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
989 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
990}
991
992armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
993 const armnn::TensorInfo & inputTensorInfo)
994{
995 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
996 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
997 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
998
999 if (inputTensorInfo.GetNumDimensions() > 4)
1000 {
1001 std::stringstream ss;
1002 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1003 << " shape:" << inputTensorInfo.GetShape() << " "
1004 << CHECK_LOCATION().AsString();
1005 throw ParseException(ss.str());
1006 }
1007
1008 if (squeezeDims.empty())
1009 {
1010 squeezeDims.assign(dimensionSequence,
1011 dimensionSequence+inputTensorInfo.GetNumDimensions());
1012 }
1013
1014 std::vector<uint32_t> outputDims;
1015 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1016 {
1017 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1018 auto currentDimension = inputTensorInfo.GetShape()[i];
1019 if (skipSqueeze || currentDimension != 1)
1020 {
1021 outputDims.push_back(currentDimension);
1022 }
1023 }
1024
1025 if (outputDims.size() > 4)
1026 {
1027 std::stringstream ss;
1028 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1029 << " shape:" << inputTensorInfo.GetShape() << " "
1030 << CHECK_LOCATION().AsString();
1031 throw ParseException(ss.str());
1032 }
1033
1034 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1035 outputDims.data());
1036
1037 // we need to preserve the tensor type and the quantization data as well
1038 TensorInfo outTensorInfo = inputTensorInfo;
1039 outTensorInfo.SetShape(outShape);
1040
1041 return outTensorInfo;
1042}
1043
1044void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1045{
1046 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1047
1048 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1049 CHECK_VALID_SIZE(inputs.size(), 1);
1050
1051 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1052 CHECK_VALID_SIZE(outputs.size(), 1);
1053
1054 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1055 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1056
1057 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1058 armnn::TensorInfo outputTensorInfo =
1059 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1060 inputTensorInfo);
1061
1062 ReshapeDescriptor reshapeDesc;
1063 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1064
1065 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1066 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1067 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1068
1069 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1070 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1071
1072 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1073 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1074}
1075
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001076void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1077{
1078 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1079
1080 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1081 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1082
1083 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1084 CHECK_VALID_SIZE(inputs.size(), 2);
1085
1086 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1087 CHECK_VALID_SIZE(outputs.size(), 1);
1088
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001089 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1090 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1091
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001092 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1093 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1094
1095 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1096 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1097
1098 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001099 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1100 {
1101 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1102 }
1103 else
1104 {
1105 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1106 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001107
1108 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1109
1110 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1111 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1112}
1113
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001114void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1115{
1116 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1117
1118 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1119 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1120
1121 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1122 CHECK_VALID_SIZE(inputs.size(), 2);
1123
1124 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1125 CHECK_VALID_SIZE(outputs.size(), 1);
1126
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001127 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1128 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1129
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001130 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1131 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1132
1133 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1134 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1135
1136 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001137 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1138 {
1139 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1140 }
1141 else
1142 {
1143 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1144 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001145
1146 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1147
1148 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1149 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1150}
1151
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001152void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1153{
1154 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1155
1156 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1157
1158 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1159 CHECK_VALID_SIZE(outputs.size(), 1);
1160
1161 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1162 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1163
1164 armnn::MeanDescriptor desc;
1165 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1166 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1167 desc.m_Axis = axis;
1168
1169 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1170 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1171
1172 desc.m_KeepDims =
1173 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1174 true : false;
1175
1176 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1177 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1178
1179 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1180
1181 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1182 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1183
1184 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1185 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1186}
1187
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001188void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1189{
1190 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1191
1192 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1193
1194 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1195 CHECK_VALID_SIZE(outputs.size(), 1);
1196
1197 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1198 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1199
1200 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1201 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1202
1203 size_t step = 2;
1204 armnn::PadDescriptor desc;
1205 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1206 {
1207 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1208 }
1209
1210 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1211 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1212
1213 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1214 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1215
1216 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1217 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1218
1219 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1220 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1221}
1222
Finn Williamsc42c3842019-01-22 14:18:11 +00001223
Sadik Armagan58f39192018-09-17 14:14:39 +01001224void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1225{
Finn Williamsc42c3842019-01-22 14:18:11 +00001226 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001227}
1228
1229void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1230{
Finn Williamsc42c3842019-01-22 14:18:11 +00001231 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1232}
Sadik Armagan58f39192018-09-17 14:14:39 +01001233
Finn Williamsc42c3842019-01-22 14:18:11 +00001234void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1235{
1236 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1237}
1238
1239
1240void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1241{
1242 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001243 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1244 boost::ignore_unused(operatorPtr);
1245
1246 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1247 CHECK_VALID_SIZE(inputs.size(), 1);
1248
1249 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1250 CHECK_VALID_SIZE(outputs.size(), 1);
1251
Finn Williamsc42c3842019-01-22 14:18:11 +00001252 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001253 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001254 activationDesc.m_Function = activationType;
1255
1256 switch (activationType)
1257 {
1258 case ActivationFunction::ReLu:
1259 {
1260 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1261 break;
1262 }
1263 case ActivationFunction::BoundedReLu:
1264 {
1265 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1266 activationDesc.m_A = 6.0f;
1267 activationDesc.m_B = 0.0f;
1268 break;
1269 }
1270 case ActivationFunction::Sigmoid:
1271 {
1272 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1273 break;
1274 }
1275 default:
1276 {
1277 throw ParseException(
1278 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1279 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1280 }
1281 }
1282
1283 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001284
1285 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1286 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1287
1288 // register the input connection slots for the layer, connections are made after all layers have been created
1289 // only the tensors for the inputs are relevant, exclude the const tensors
1290 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1291 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1292
1293 // register the output connection slots for the layer, connections are made after all layers have been created
1294 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1295 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1296}
Sadikb94967b2018-09-19 15:30:00 +01001297armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1298 const std::vector<int32_t> & targetDimsIn)
1299{
1300 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1301 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1302
1303 if (stretchDim != targetDimsIn.end())
1304 {
1305 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1306 {
1307 throw ParseException(
1308 boost::str(
1309 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1310 }
1311
1312 auto targetNumElements =
1313 boost::numeric_cast<unsigned int>(
1314 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1315
1316 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1317 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1318 }
1319
1320 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1321
1322 TensorInfo reshapeInfo = inputTensorInfo;
1323 reshapeInfo.SetShape(outputShape);
1324
1325 return reshapeInfo;
1326}
1327
1328void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1329{
1330 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1331
1332 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001333
1334 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1335 CHECK_VALID_SIZE(outputs.size(), 1);
1336
1337 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1338 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1339
1340 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001341 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1342 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001343 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1344
kevmay0171972a82018-12-17 14:28:03 +00001345 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001346 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1347 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001348 {
1349 std::stringstream ss;
1350 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001351 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001352 << " does not equal output shape "
1353 << actualOutputTensorInfo.GetShape()
1354 << ": "
1355 << CHECK_LOCATION().AsString();
1356 throw ParseException(ss.str());
1357 }
1358
Sadikb94967b2018-09-19 15:30:00 +01001359 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001360 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001361
1362 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1363 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001364 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001365
1366 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1367 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1368
1369 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1370 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1371}
1372
Sadik Armagan479045b2018-10-01 11:51:37 +01001373void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1374{
1375 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1376
1377 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1378 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1379
1380 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1381
1382 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1383 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1384 CHECK_VALID_SIZE(outputs.size(), 1);
1385
1386 unsigned int numInputs = static_cast<unsigned int>(inputs.size());
1387 unsigned int numConcatView = numInputs;
1388
1389 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), MaxNumOfTensorDimensions);
1390 std::vector<unsigned int>mergeDimSizes(MaxNumOfTensorDimensions, 0u);
1391
1392 unsigned int mergeDim = 0;
1393
1394 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
1395 // axis could also be negative numbers. Negative axis are interpreted as counting from the end of the rank,
1396 // i.e., axis + rank(values)-th dimension.
1397 int32_t inputRank = static_cast<int32_t>(ToTensorInfo(inputs[0]).GetNumDimensions());
1398 const unsigned int concatDimInput = static_cast<unsigned int>((inputRank + options->axis) % inputRank);
1399
1400 // ArmNN supports concatenation along the channel dimension for data formats NHWC and NCHW.
1401 if (concatDimInput == 0 || concatDimInput == 2)
1402 {
1403 throw ParseException(
1404 boost::str(
1405 boost::format(
1406 "Dimension %1% for concatenation is not supported by Armnn. "
1407 "Node %2%")
1408 % concatDimInput
1409 % CHECK_LOCATION().AsString()));
1410 }
1411
1412 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1413 {
1414 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1415
1416 // process the input tensor info
1417 armnnUtils::ProcessConcatInputTensorInfo(inputTensorInfo, concatDescriptor,
1418 concatDimInput, viewIndex, mergeDimSizes, mergeDim);
1419 }
1420
1421 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
1422 IConnectableLayer* layer = m_Network->AddMergerLayer(concatDescriptor, layerName.c_str());
1423
1424 BOOST_ASSERT(layer != nullptr);
1425
1426 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1427 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1428 if (concatDimInput == 3)
1429 {
1430 // Adding Fused Activation Layer after this moment....
1431 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1432 {
1433 // add permute layers to swizzle the inputs
1434 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1435 IConnectableLayer* const swizzleLayer = SwizzleIn(*m_Network, layer, viewIndex, inputTensorInfo);
1436
1437 BOOST_ASSERT(swizzleLayer != nullptr);
1438
1439 // register the input connection slots for the layer
1440 // only the tensors for the inputs are relevant, exclude the const tensors
1441 RegisterInputSlots(subgraphIndex, operatorIndex, swizzleLayer, {inputTensorIndexes[viewIndex]});
1442 }
1443
1444 // add permute layer to deswizzle the output
1445 IConnectableLayer* const deswizzleLayer = DeswizzleOut(*m_Network, layer, 0, outputTensorInfo);
1446
1447 // add fused activation layer after the trailing swizzle layer
1448 layer = AddFusedActivationLayer(deswizzleLayer, 0, options->fused_activation_function);
1449 }
1450 else
1451 {
1452 // set the layer output tensor info
1453 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1454
1455 // register the input connection slots for the layer, connections are made after all layers have been created
1456 // only the tensors for the inputs are relevant, exclude the const tensors
1457 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
1458 }
1459
1460 // register the output connection slots for the layer, connections are made after all layers have been created
1461 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1462 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1463}
1464
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001465void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1466{
1467 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1468
1469 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1470 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1471
1472 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1473
1474 FullyConnectedDescriptor desc;
1475 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001476 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001477
1478 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1479 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1480 CHECK_VALID_SIZE(outputs.size(), 1);
1481
1482 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1483
1484 // Fully Connected Layer accepts two dimensional weights input
1485 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1486 if (weightsDimension != 2)
1487 {
1488 throw ParseException(
1489 boost::str(
1490 boost::format(
1491 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1492 "Node %2%")
1493 % weightsDimension
1494 % CHECK_LOCATION().AsString()));
1495 }
1496
Matteo Martincigh747ef822018-12-18 09:26:39 +00001497 auto filterTensorAndData = CreateConstTensor(inputs[1],
1498 filterTensorInfo,
1499 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001500 armnn::IConnectableLayer* layer;
1501 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1502
1503 if (inputs.size() == 3)
1504 {
1505 desc.m_BiasEnabled = true;
1506 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001507 auto biasTensorAndData = CreateConstTensor(inputs[2],
1508 biasTensorInfo,
1509 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001510 layer = m_Network->AddFullyConnectedLayer(desc,
1511 filterTensorAndData.first,
1512 biasTensorAndData.first,
1513 layerName.c_str());
1514 }
1515 else
1516 {
1517 layer = m_Network->AddFullyConnectedLayer(desc,
1518 filterTensorAndData.first,
1519 layerName.c_str());
1520 }
1521 BOOST_ASSERT(layer != nullptr);
1522
1523 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1524 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1525
1526 // register the input connection slot for the layer
1527 // only the tensors for the inputs are relevant, exclude the const tensors
1528 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1529 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1530
1531 // we need to add the activation layer and fortunately we don't need to care about the data layout
1532 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1533 options->fused_activation_function);
1534 // register the output connection slots for the layer, connections are made after all layers have been created
1535 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1536 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1537}
1538
Sadik Armagan58f39192018-09-17 14:14:39 +01001539armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
1540 unsigned int outputSlot,
1541 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01001542{
1543 ActivationDescriptor activationDesc;
1544 std::string layerName = prevLayer->GetName();
1545
1546 switch(activationType)
1547 {
1548 case tflite::ActivationFunctionType_NONE:
1549 {
1550 // this is a no-op: return previous layer
1551 return prevLayer;
1552 }
1553 case tflite::ActivationFunctionType_RELU:
1554 {
1555 activationDesc.m_Function = ActivationFunction::ReLu;
1556 layerName += ":RELU";
1557 break;
1558 }
1559 case tflite::ActivationFunctionType_RELU6:
1560 {
1561 activationDesc.m_Function = ActivationFunction::BoundedReLu;
1562 activationDesc.m_A = 6.0f;
1563 activationDesc.m_B = 0.0f;
1564 layerName += ":RELU6";
1565 break;
1566 }
1567 case tflite::ActivationFunctionType_TANH:
1568 {
1569 activationDesc.m_Function = ActivationFunction::TanH;
1570 activationDesc.m_A = 1.0f;
1571 activationDesc.m_B = 1.0f;
1572 layerName += ":TANH";
1573 break;
1574 }
1575
1576 // I only put these here as a reminder what others we could support
1577 case tflite::ActivationFunctionType_RELU_N1_TO_1:
1578 case tflite::ActivationFunctionType_SIGN_BIT:
1579 default:
1580 {
1581 throw ParseException(
1582 boost::str(
1583 boost::format("TfLite parser doesn't suppport fused activation: "
1584 "%1%/%2% %3% ") %
1585 activationType %
1586 tflite::EnumNameActivationFunctionType(activationType) %
1587 CHECK_LOCATION().AsString()));
1588
1589 }
1590 }
1591
1592 IConnectableLayer* activationLayer =
1593 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1594
1595 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
1596 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
1597 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
1598 return activationLayer;
1599}
1600
1601TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
1602{
1603 if (fileName == nullptr)
1604 {
1605 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
1606 CHECK_LOCATION().AsString()));
1607 }
1608 boost::system::error_code errorCode;
1609 boost::filesystem::path pathToFile(fileName);
1610 if (!boost::filesystem::exists(pathToFile, errorCode))
1611 {
1612 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
1613 fileName %
1614 errorCode %
1615 CHECK_LOCATION().AsString()));
1616 }
1617 std::ifstream file(fileName, std::ios::binary);
1618 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
1619 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
1620 fileContent.size());
1621}
1622
1623TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
1624{
1625 if (binaryContent == nullptr)
1626 {
1627 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
1628 CHECK_LOCATION().AsString()));
1629 }
1630 flatbuffers::Verifier verifier(binaryContent, len);
1631 if (verifier.VerifyBuffer<tflite::Model>() == false)
1632 {
1633 throw ParseException(
1634 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
1635 "flatbuffers format. size:%1% %2%") %
1636 len %
1637 CHECK_LOCATION().AsString()));
1638 }
1639 return tflite::UnPackModel(binaryContent);
1640}
1641
1642TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
1643 size_t subgraphIndex,
1644 size_t operatorIndex)
1645{
1646 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1647
1648 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1649 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1650
1651 size_t inputCount = operatorPtr->inputs.size();
1652 TensorRawPtrVector result(inputCount);
1653 for (size_t i=0; i<inputCount; ++i)
1654 {
1655 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
1656 result[i] = subGraphPtr->tensors[inputId].get();
1657 }
1658 return result;
1659}
1660
1661TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
1662 size_t subgraphIndex,
1663 size_t operatorIndex)
1664{
1665 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1666
1667 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1668 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1669
1670 size_t outputCount = operatorPtr->outputs.size();
1671 TensorRawPtrVector result(outputCount);
1672 for (size_t i=0; i<outputCount; ++i)
1673 {
1674 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
1675 CHECK_TENSOR(model, subgraphIndex, outputId);
1676 result[i] = subGraphPtr->tensors[outputId].get();
1677 }
1678 return result;
1679}
1680
1681TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
1682 size_t subgraphIndex)
1683{
1684 CHECK_SUBGRAPH(model, subgraphIndex);
1685 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1686
1687 size_t inputCount = subGraphPtr->inputs.size();
1688 TensorIdRawPtrVector result(inputCount);
1689 for (size_t i=0; i<inputCount; ++i)
1690 {
1691 uint32_t inputId = CHECKED_NON_NEGATIVE(subGraphPtr->inputs[i]);
1692 CHECK_TENSOR(model, subgraphIndex, inputId);
1693 result[i] = std::make_pair(inputId, subGraphPtr->tensors[inputId].get());
1694 }
1695 return result;
1696}
1697
1698TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
1699 size_t subgraphIndex)
1700{
1701 CHECK_SUBGRAPH(model, subgraphIndex);
1702 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1703
1704 size_t outputCount = subGraphPtr->outputs.size();
1705 TensorIdRawPtrVector result(outputCount);
1706 for (size_t i=0; i<outputCount; ++i)
1707 {
1708 uint32_t outputId = CHECKED_NON_NEGATIVE(subGraphPtr->outputs[i]);
1709 result[i] = std::make_pair(outputId, subGraphPtr->tensors[outputId].get());
1710 }
1711 return result;
1712}
1713
1714std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
1715 size_t subgraphIndex,
1716 size_t operatorIndex)
1717{
1718 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1719 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1720 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1721 return operatorPtr->inputs;
1722}
1723
1724std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
1725 size_t subgraphIndex,
1726 size_t operatorIndex)
1727{
1728 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1729 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1730 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1731 return operatorPtr->outputs;
1732}
1733
1734void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
1735 size_t operatorIndex,
1736 IConnectableLayer* layer,
1737 const std::vector<unsigned int>& tensorIndexes)
1738{
1739 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1740 BOOST_ASSERT(layer != nullptr);
1741 if (tensorIndexes.size() != layer->GetNumInputSlots())
1742 {
1743 throw ParseException(
1744 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
1745 " for subgraph:%3% operator index:%4% %5%") %
1746 tensorIndexes.size() %
1747 layer->GetNumInputSlots() %
1748 subgraphIndex %
1749 operatorIndex %
1750 CHECK_LOCATION().AsString()));
1751 }
1752
1753 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
1754 {
1755 unsigned int tensorIndex = tensorIndexes[slotIndex];
1756 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
1757 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
1758 }
1759}
1760
1761void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
1762 size_t operatorIndex,
1763 IConnectableLayer* layer,
1764 const std::vector<unsigned int>& tensorIndexes)
1765{
1766 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1767 BOOST_ASSERT(layer != nullptr);
1768 if (tensorIndexes.size() != layer->GetNumOutputSlots())
1769 {
1770 throw ParseException(
1771 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
1772 " for subgraph:%3% operator index:%4% %5%") %
1773 tensorIndexes.size() %
1774 layer->GetNumOutputSlots() %
1775 subgraphIndex %
1776 operatorIndex %
1777 CHECK_LOCATION().AsString()));
1778 }
1779
1780 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1781 {
1782 unsigned int tensorIndex = tensorIndexes[slotIndex];
1783 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
1784 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
1785 }
1786}
1787
1788void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
1789{
1790 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1791
1792 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
1793 for (auto const & tensorIdAndPtr : inputs)
1794 {
1795 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
1796 IConnectableLayer* layer =
1797 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
1798
1799 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
1800 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1801
1802 RegisterOutputSlots(subgraphIndex,
1803 VIRTUAL_OPERATOR_ID,
1804 layer,
1805 { static_cast<uint32_t>(tensorIdAndPtr.first) });
1806 }
1807}
1808
1809void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
1810{
1811 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1812
1813 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
1814 for (auto const & tensorIdAndPtr : outputs)
1815 {
1816 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
1817 IConnectableLayer* layer =
1818 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
1819
1820 RegisterInputSlots(subgraphIndex,
1821 VIRTUAL_OPERATOR_ID,
1822 layer,
1823 { static_cast<uint32_t>(tensorIdAndPtr.first) });
1824 }
1825}
1826
1827// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
1828TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
1829{
1830 CHECK_BUFFER(model, bufferIndex);
1831 return model->buffers[bufferIndex].get();
1832}
1833
Matteo Martincigh747ef822018-12-18 09:26:39 +00001834template<typename T>
1835std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
1836TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
1837 TfLiteParser::TensorRawPtr tensorPtr,
1838 armnn::TensorInfo& tensorInfo,
1839 armnn::Optional<armnn::PermutationVector&> permutationVector)
1840{
1841 auto constData = CreateConstTensorImpl<T>(bufferPtr,
1842 tensorPtr,
1843 tensorInfo,
1844 permutationVector);
1845 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
1846 return std::make_pair(constData.first, std::move(storage));
1847}
1848
telsoa01c577f2c2018-08-31 09:22:23 +01001849std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
1850TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001851 armnn::TensorInfo& tensorInfo,
1852 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01001853{
1854 CHECK_TENSOR_PTR(tensorPtr);
1855 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
1856 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
1857
1858 switch (tensorInfo.GetDataType())
1859 {
1860 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001861 return CreateConstTensorAndStoreData<float>(bufferPtr,
1862 tensorPtr,
1863 tensorInfo,
1864 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001865 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001866 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
1867 tensorPtr,
1868 tensorInfo,
1869 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001870 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001871 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
1872 tensorPtr,
1873 tensorInfo,
1874 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001875 default:
1876 {
1877 std::stringstream errString;
1878 errString << "Unexpected datatype when creating const tensor: "
1879 << armnn::GetDataTypeName(tensorInfo.GetDataType())
1880 << " shape:" << tensorInfo.GetShape()
1881 << CHECK_LOCATION().AsString();
1882 throw ParseException(errString.str());
1883 }
1884 }
1885}
1886
1887BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
1888 const std::string& name) const
1889{
1890 CHECK_SUBGRAPH(m_Model, subgraphId);
1891 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
1892 for (auto const & input : inputs)
1893 {
1894 if (input.second->name == name)
1895 {
1896 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
1897 return std::make_pair(bindingId, ToTensorInfo(input.second));
1898 }
1899 }
1900
1901 std::stringstream bindings;
1902 for (auto const & input : inputs)
1903 {
1904 bindings << "'" << input.second->name << "' ";
1905 }
1906
1907 throw ParseException(
1908 boost::str(
1909 boost::format("No input binding found for subgraph:%1% and name:%2%. "
1910 "Possible inputs are: [%3%] %4%") %
1911 subgraphId %
1912 name %
1913 bindings.str() %
1914 CHECK_LOCATION().AsString()));
1915}
1916
1917BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
1918 const std::string& name) const
1919{
1920 CHECK_SUBGRAPH(m_Model, subgraphId);
1921 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
1922 for (auto const & output : outputs)
1923 {
1924 if (output.second->name == name)
1925 {
1926 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
1927 return std::make_pair(bindingId, ToTensorInfo(output.second));
1928 }
1929 }
1930
1931 std::stringstream bindings;
1932 for (auto const & output : outputs)
1933 {
1934 bindings << "'" << output.second->name << "' ";
1935 }
1936
1937 throw ParseException(
1938 boost::str(
1939 boost::format("No output binding found for subgraph:%1% and name:%2%. "
1940 "Possible outputs are: [%3%] %4%") %
1941 subgraphId %
1942 name %
1943 bindings.str() %
1944 CHECK_LOCATION().AsString()));
1945}
1946
1947size_t TfLiteParser::GetSubgraphCount() const
1948{
1949 return m_Model->subgraphs.size();
1950}
1951
1952std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
1953{
1954 CHECK_SUBGRAPH(m_Model, subgraphId);
1955 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
1956 std::vector<std::string> result;
1957 result.reserve(inputs.size());
1958 for (auto const & input : inputs)
1959 {
1960 result.push_back(input.second->name);
1961 }
1962 return result;
1963}
1964
1965std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
1966{
1967 CHECK_SUBGRAPH(m_Model, subgraphId);
1968 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
1969 std::vector<std::string> result;
1970 result.reserve(outputs.size());
1971 for (auto const & output : outputs)
1972 {
1973 result.push_back(output.second->name);
1974 }
1975 return result;
1976}
1977
1978ITfLiteParser* ITfLiteParser::CreateRaw()
1979{
1980 return new TfLiteParser();
1981}
1982
1983ITfLiteParserPtr ITfLiteParser::Create()
1984{
1985 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
1986}
1987
1988void ITfLiteParser::Destroy(ITfLiteParser* parser)
1989{
1990 delete parser;
1991}
1992
1993TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
1994: m_FloatData(std::move(data))
1995, m_Uint8Data(nullptr)
1996, m_Int32Data(nullptr)
1997{
1998}
1999
2000TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2001: m_FloatData(nullptr)
2002, m_Uint8Data(std::move(data))
2003, m_Int32Data(nullptr)
2004{
2005}
2006
2007TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2008: m_FloatData(nullptr)
2009, m_Uint8Data(nullptr)
2010, m_Int32Data(std::move(data))
2011{
2012}
2013
2014} // armnnTfLiteParser