blob: affd858d77832fc2c86a7d6f56dd3fde16bde000 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
24
25#include <fstream>
26#include <algorithm>
27#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010028#include <numeric>
telsoa01c577f2c2018-08-31 09:22:23 +010029
30using namespace armnn;
31using armnn::CheckLocation;
32namespace armnnTfLiteParser
33{
34namespace
35{
36const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
37const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
38
jimfly01c25411c2018-11-14 17:47:22 +000039IConnectableLayer* SwizzleIn(INetwork& network,
40 IConnectableLayer* layer,
41 unsigned int inputSlotIndex,
42 const TensorInfo & inputInfo)
43{
44 BOOST_ASSERT(layer != nullptr);
45 // Add swizzle layer
46 std::stringstream name;
47 name << "swizzle_for-" << layer->GetName() << ":in" << inputSlotIndex;
48 IConnectableLayer* const swizzleLayer = network.AddPermuteLayer(NHWCToArmNN, name.str().c_str());
49 // Set swizzled output shape
50 const TensorInfo swizzleOutInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
51 swizzleLayer->GetOutputSlot(0).SetTensorInfo(swizzleOutInfo);
52 // Connect the swizzle layer to the actual layer
53 swizzleLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(inputSlotIndex));
54
55 return swizzleLayer;
56}
57
58IConnectableLayer* DeswizzleOut(INetwork& network,
59 IConnectableLayer* layer,
60 unsigned int outputSlotIndex,
61 const TensorInfo & outputInfo)
62{
63 BOOST_ASSERT(layer != nullptr);
64 // Add deswizzle layer
65 std::stringstream name;
66 name << "deswizzle_for-" << layer->GetName() << ":out" << outputSlotIndex;
67 IConnectableLayer* const deswizzleLayer = network.AddPermuteLayer(ArmNNToNHWC, name.str().c_str());
68 // Set deswizzled output shape
69 deswizzleLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
70 // Set original layer output shape
71 const TensorInfo deswizzleOutInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
72 layer->GetOutputSlot(outputSlotIndex).SetTensorInfo(deswizzleOutInfo);
73 // Connect the actual layer to the deswizzle layer
74 layer->GetOutputSlot(outputSlotIndex).Connect(deswizzleLayer->GetInputSlot(0));
75
76 return deswizzleLayer;
77}
78
telsoa01c577f2c2018-08-31 09:22:23 +010079const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
80
81void CheckSubgraph(const TfLiteParser::ModelPtr & model,
82 size_t subgraphIndex,
83 const CheckLocation & location)
84{
85 if (model.get() == nullptr)
86 {
87 throw ParseException(
88 boost::str(
89 boost::format("%1% was called with invalid (null) model. "
90 "Possible reason is that the model is not yet loaded and Unpack(ed). "
91 "subgraph:%2% at %3%") %
92 location.m_Function %
93 subgraphIndex %
94 location.FileLine()));
95 }
96 else if (subgraphIndex >= model->subgraphs.size())
97 {
98 throw ParseException(
99 boost::str(
100 boost::format("%1% was called with an invalid subgraph index. "
101 "subgraph:%2% at %3%") %
102 location.m_Function %
103 subgraphIndex %
104 location.FileLine()));
105 }
106}
107
108#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
109 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
110
111void CheckModel(const TfLiteParser::ModelPtr & model,
112 size_t subgraphIndex,
113 size_t operatorIndex,
114 const CheckLocation & location)
115{
116 if (model.get() == nullptr)
117 {
118 throw ParseException(
119 boost::str(
120 boost::format("%1% was called with invalid (null) model. "
121 "Possible reason is that the model is not yet loaded and Unpack(ed). "
122 "subgraph:%2% operator:%3% at %4%") %
123 location.m_Function %
124 subgraphIndex %
125 operatorIndex %
126 location.FileLine()));
127 }
128 else if (subgraphIndex >= model->subgraphs.size())
129 {
130 throw ParseException(
131 boost::str(
132 boost::format("%1% was called with an invalid subgraph index. "
133 "subgraph:%2% operator:%3% at %4%") %
134 location.m_Function %
135 subgraphIndex %
136 operatorIndex %
137 location.FileLine()));
138 }
139 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
140 operatorIndex != VIRTUAL_OPERATOR_ID)
141 {
142 throw ParseException(
143 boost::str(
144 boost::format("%1% was called with an invalid operator index. "
145 "subgraph:%2% operator:%3% at %4%") %
146 location.m_Function %
147 subgraphIndex %
148 operatorIndex %
149 location.FileLine()));
150 }
151}
152
153#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
154 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
155
156void CheckTensor(const TfLiteParser::ModelPtr & model,
157 size_t subgraphIndex,
158 size_t tensorIndex,
159 const CheckLocation & location)
160{
161 // not checking model, because I assume CHECK_MODEL already run
162 // and checked that. An assert would do.
163 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
164
165 // also subgraph index should be checked by CHECK_MODEL so
166 // I only add an assert here
167 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
168
169 // the tensor index is the only one to check here
170 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
171 {
172 throw ParseException(
173 boost::str(
174 boost::format("%1% was called with an invalid tensor index. "
175 "subgraph:%2% tensor:%3% at %4%") %
176 location.m_Function %
177 subgraphIndex %
178 tensorIndex %
179 location.FileLine()));
180 }
181}
182
183#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
184 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
185
186void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
187 const CheckLocation & location)
188{
189 if (rawPtr == nullptr)
190 {
191 throw ParseException(
192 boost::str(
193 boost::format("%1% was called with a null tensor pointer. "
194 "at %2%") %
195 location.m_Function %
196 location.FileLine()));
197
198 }
199}
200
201#define CHECK_TENSOR_PTR(TENSOR_PTR) \
202 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
203
204void CheckBuffer(const TfLiteParser::ModelPtr & model,
205 size_t bufferIndex,
206 const CheckLocation & location)
207{
208 if (model.get() == nullptr)
209 {
210 throw ParseException(
211 boost::str(
212 boost::format("%1% was called with invalid (null) model. "
213 "Possible reason is that the model is not yet loaded and Unpack(ed). "
214 "buffer:%2% at %3%") %
215 location.m_Function %
216 bufferIndex %
217 location.FileLine()));
218 }
219 else if (bufferIndex >= model->buffers.size())
220 {
221 throw ParseException(
222 boost::str(
223 boost::format("%1% was called with an invalid buffer index. "
224 "buffer index:%2% at %3%") %
225 location.m_Function %
226 bufferIndex %
227 location.FileLine()));
228 }
229 else if (model->buffers[bufferIndex].get() == nullptr)
230 {
231 throw ParseException(
232 boost::str(
233 boost::format("The buffer #%1% is null. %3%") %
234 bufferIndex %
235 location.AsString()));
236 }
237}
238
239#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
240 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
241
242void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
243 const armnn::TensorInfo & tensorInfo,
244 uint32_t bufferId,
245 const CheckLocation & location)
246{
247 if (bufferPtr == nullptr)
248 {
249 throw ParseException(
250 boost::str(
251 boost::format("BufferPtr is null for buffer:%1%. %2%") %
252 bufferId %
253 location.AsString()));
254 }
255 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
256 tensorInfo.GetNumBytes() > bufferPtr->data.size())
257 {
258 std::stringstream ss;
259 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
260 << "For tensor: " << tensorInfo.GetShape()
261 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
262 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
263 throw ParseException(ss.str());
264 }
265}
266
267#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
268 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
269
270bool IsActivationSupported(tflite::ActivationFunctionType activationType)
271{
272 switch(activationType)
273 {
274 case tflite::ActivationFunctionType_NONE:
275 case tflite::ActivationFunctionType_RELU:
276 case tflite::ActivationFunctionType_RELU6:
277 case tflite::ActivationFunctionType_TANH:
278 {
279 return true;
280 }
281 default:
282 {
283 return false;
284 }
285 }
286}
287
288#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
289 do { \
290 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
291 { \
292 throw ParseException( \
293 boost::str( \
294 boost::format("TfLite parser doesn't suppport fused activation: " \
295 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
296 OPTION->fused_activation_function % \
297 tflite::EnumNameActivationFunctionType(\
298 OPTION->fused_activation_function) % \
299 __func__ % \
300 SUBGRAPH_INDEX % \
301 OPERATOR_INDEX % \
302 CHECK_LOCATION().FileLine())); \
303 } \
304 } while(false)
305
306
307std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
308{
309 std::vector<unsigned int> result;
310 result.reserve(in.size());
311 for (auto & i : in)
312 {
313 result.push_back(CHECKED_NON_NEGATIVE(i));
314 }
315 return result;
316}
317
318void CalcPadding(uint32_t inputSize,
319 uint32_t filterSize,
320 uint32_t stride,
321 uint32_t& paddingFront,
322 uint32_t& paddingBack,
323 tflite::Padding padding)
324{
325 paddingFront = 0;
326 paddingBack = 0;
327 if (padding == tflite::Padding_SAME)
328 {
329 uint32_t outputSize = (inputSize + stride - 1) / stride;
330 uint32_t temp = (outputSize - 1) * stride + filterSize;
331 if (temp > inputSize)
332 {
333 paddingFront = (temp - inputSize) / 2;
334 paddingBack = (temp - inputSize) - paddingFront;
335 }
336 }
337}
338
339armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
340{
341 armnn::DataType type;
342 CHECK_TENSOR_PTR(tensorPtr);
343
344 switch (tensorPtr->type)
345 {
346 case tflite::TensorType_UINT8:
347 type = armnn::DataType::QuantisedAsymm8;
348 break;
349 case tflite::TensorType_FLOAT32:
350 type = armnn::DataType::Float32;
351 break;
352 case tflite::TensorType_INT32:
353 type = armnn::DataType::Signed32;
354 break;
355
356 default:
357 {
358 CheckLocation location = CHECK_LOCATION();
359 throw ParseException(
360 boost::str(
361 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
362 tensorPtr->type %
363 tflite::EnumNameTensorType(tensorPtr->type) %
364 tensorPtr->name %
365 location.AsString()));
366 }
367 }
368
369 float quantizationScale = 0.0f;
370 int32_t quantizationOffset = 0;
371
372 if (tensorPtr->quantization.get())
373 {
374 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
375 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
376
377 if (tensorPtr->quantization->scale.size() == 1)
378 {
379 quantizationScale = tensorPtr->quantization->scale[0];
380 }
381 if (tensorPtr->quantization->zero_point.size() == 1)
382 {
383 // NOTE: we lose precision here when converting from 64 bit to 32
384 // but this is what we support at the monent in ArmNN
385 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
386 }
387 }
388
389 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
390
391 // two statements (on purpose) for easier debugging:
392 armnn::TensorInfo result(static_cast<unsigned int>(tensorPtr->shape.size()),
393 dimensions.data(),
394 type,
395 quantizationScale,
396 quantizationOffset);
397 return result;
398}
399
400template<typename T>
401std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
402CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
403 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000404 armnn::TensorInfo& tensorInfo,
405 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100406{
407 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
408 BOOST_ASSERT_MSG(bufferPtr != nullptr,
409 boost::str(
410 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
411
412 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000413
414 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
415 {
416 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000417 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
418 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000419 }
420 else
421 {
422 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
423 }
424
telsoa01c577f2c2018-08-31 09:22:23 +0100425 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
426}
427
telsoa01c577f2c2018-08-31 09:22:23 +0100428armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
429{
430 // generate the binding id by shifting the tensor id by 8 bit
431 // and add the subgraph id, which allows 256 subgraphs
432 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
433}
434
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000435bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
436{
437 const unsigned int actualSize = actual.GetNumDimensions();
438 if (actualSize != expected.size())
439 {
440 return false;
441 }
442
443 for (unsigned int i = 0u; i < actualSize; i++)
444 {
445 if (expected[i] < 0 ||
446 actual[i] != static_cast<unsigned int>(expected[i]))
447 {
448 return false;
449 }
450 }
451
452 return true;
453}
454
telsoa01c577f2c2018-08-31 09:22:23 +0100455} // <anonymous>
456
457TfLiteParser::TfLiteParser()
458: m_Network(nullptr, nullptr)
459, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
460{
461 // register supported operators
462 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Sadik Armagan479045b2018-10-01 11:51:37 +0100463 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100464 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
465 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100466 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100467 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Sadik Armagan58f39192018-09-17 14:14:39 +0100468 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
469 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100470 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Sadik Armagan479045b2018-10-01 11:51:37 +0100471 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
472 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200473 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200474 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200475 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200476 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
telsoa01c577f2c2018-08-31 09:22:23 +0100477}
478
479void TfLiteParser::ResetParser()
480{
481 m_Network = armnn::INetworkPtr(nullptr, nullptr);
482 m_Model = nullptr;
483 m_SubgraphConnections.clear();
484}
485
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200486void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
487 size_t operatorIndex,
488 IConnectableLayer *layer)
489{
490 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
491 BOOST_ASSERT(layer != nullptr);
492
493 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
494 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
495
496 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
497
498 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
499 TensorRawPtr tensorPtr = subGraphPtr->tensors[reshapedInputId].get();
500 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
501 TensorRawPtr tensorPtr1 = subGraphPtr->tensors[inputId].get();
502
503 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
504 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
505
506 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
507 {
508 uint32_t id = reshapedInputId;
509 reshapedInputId = inputId;
510 inputId = id;
511
512 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
513 inputTensorInfo = ToTensorInfo(tensorPtr);
514 }
515
516 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
517
518 std::vector<unsigned> reshapedDim;
519 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
520 {
521 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
522 }
523
524 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
525 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
526
527 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
528
529 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
530 armnn::ReshapeDescriptor desc;
531 desc.m_TargetShape = reshapedTensorInfo.GetShape();
532 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
533
534 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
535 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
536
537 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
538
539 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
540 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
541}
542
telsoa01c577f2c2018-08-31 09:22:23 +0100543INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
544{
545 ResetParser();
546 m_Model = LoadModelFromFile(graphFile);
547 return CreateNetworkFromModel();
548}
549
550INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
551{
552 ResetParser();
553 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
554 return CreateNetworkFromModel();
555}
556
557INetworkPtr TfLiteParser::CreateNetworkFromModel()
558{
559 m_Network = INetwork::Create();
560 BOOST_ASSERT(m_Model.get() != nullptr);
561
562 bool failedToCreate = false;
563 std::stringstream errors;
564
565 if (m_Model->subgraphs.size() != 1)
566 {
567 throw ParseException(
568 boost::str(
569 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
570 m_Model->subgraphs.size() %
571 CHECK_LOCATION().AsString()));
572 }
573
574 size_t subgraphIndex = 0;
575 for (SubGraphPtr const & subgraph : m_Model->subgraphs)
576 {
577 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
578
579 size_t operatorIndex = 0;
580 for (OperatorPtr const & op : subgraph->operators)
581 {
582 try
583 {
584 if (op->custom_options.size() > 0)
585 {
586 throw ParseException(
587 boost::str(
588 boost::format("Custom options for op: %1% is not supported. "
589 "It has %2% bytes of custom options. %3%") %
590 op->opcode_index %
591 op->custom_options.size() %
592 CHECK_LOCATION().AsString()));
593 }
594
595 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
596 auto builtinCode = opCodePtr->builtin_code;
597
598 if (builtinCode > tflite::BuiltinOperator_MAX)
599 {
600 throw ParseException(
601 boost::str(
602 boost::format("Operator code %1% is out of range 0-%2%. "
603 "subgraph:%3% operator idx:%4%. %5%") %
604 builtinCode %
605 tflite::BuiltinOperator_MAX %
606 subgraphIndex %
607 operatorIndex %
608 CHECK_LOCATION().AsString()));
609 }
610
611 // lookup and call the parser function
612 auto & parserFunction = m_ParserFunctions[builtinCode];
613 (this->*parserFunction)(subgraphIndex, operatorIndex);
614 }
615 catch (const ParseException& e)
616 {
617 failedToCreate = true;
618 std::stringstream errorString;
619
620 errorString << "Failed to parse operator #" << operatorIndex
621 << " within subgraph #" << subgraphIndex
622 << " error: " << e.what();
623 BOOST_LOG_TRIVIAL(error) << errorString.str();
624
625 errors << errorString.str() << "\n";
626 }
627 ++operatorIndex;
628 }
629
630 SetupInputLayers(subgraphIndex);
631 SetupOutputLayers(subgraphIndex);
632
633 ++subgraphIndex;
634 }
635
636 if (failedToCreate)
637 {
638 // we can skip everything and let the outer exception handler deal with the error
639 throw ParseException(errors.str());
640 }
641
642 // establish the connections from the layer outputs to the inputs of the subsequent layers
643 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
644 {
645 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
646 {
647 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
648 {
649 for (size_t inputSlotIdx = 0;
650 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
651 ++inputSlotIdx)
652 {
653 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
654 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
655 }
656 }
657 }
658 }
659
660 return std::move(m_Network);
661}
662
663void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
664 size_t tensorIndex,
665 armnn::IOutputSlot* slot)
666{
667 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
668 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
669 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
670
671 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
672
673 // assuming there is only one producer for that tensor
674 if (tensorSlots.outputSlot != nullptr)
675 {
676 throw ParseException(boost::str(
677 boost::format("Another layer has already registered itself as the producer of "
678 "subgraph:%1% tensor:%2% %3%") %
679 subgraphIndex %
680 tensorIndex %
681 CHECK_LOCATION().AsString()));
682 }
683
684 tensorSlots.outputSlot = slot;
685}
686
687void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
688 size_t tensorIndex,
689 armnn::IInputSlot* slot)
690{
691 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
692 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
693 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
694
695 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
696 tensorSlots.inputSlots.push_back(slot);
697}
698
699void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
700{
701 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
702 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
703 //
704 auto opcodeIndex = operatorPtr->opcode_index;
705 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
706
707 throw ParseException(
708 boost::str(
709 boost::format("Operator not supported. "
710 "subgraph:%1% operator:%2% "
711 "opcode_index:%3% opcode:%4% / %5% %6%") %
712 subgraphIndex %
713 operatorIndex %
714 opcodeIndex %
715 opcode %
716 tflite::EnumNameBuiltinOperator(opcode) %
717 CHECK_LOCATION().AsString()));
718}
719
telsoa01c577f2c2018-08-31 09:22:23 +0100720void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
721{
722 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
723
724 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
725 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
726
727 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
728
729 Convolution2dDescriptor desc;
730 desc.m_BiasEnabled = false;
731 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
732 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000733 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100734
735 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
736 CHECK_VALID_SIZE(inputs.size(), 2, 3);
737
738 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
739 CHECK_VALID_SIZE(outputs.size(), 1);
740
741 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
742 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
743
744 // assuming input is NHWC
745 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
746 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
747
748 // assuming the filter is OHWI : Output, H, W, Input
749 // which is essentially the same as NHWC
750 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
751 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
752
753 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
754 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
755
Matteo Martincigh747ef822018-12-18 09:26:39 +0000756 auto filterTensorAndData = CreateConstTensor(inputs[1],
757 filterTensorInfo,
758 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100759 armnn::IConnectableLayer* layer;
760
761 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
762
763 if (inputs.size() == 3)
764 {
765 desc.m_BiasEnabled = true;
766 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000767 auto biasTensorAndData = CreateConstTensor(inputs[2],
768 biasTensorInfo,
769 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100770 layer = m_Network->AddConvolution2dLayer(desc,
771 filterTensorAndData.first,
772 biasTensorAndData.first,
773 layerName.c_str());
774 }
775 else
776 {
777 layer = m_Network->AddConvolution2dLayer(desc,
778 filterTensorAndData.first,
779 layerName.c_str());
780 }
781
782 BOOST_ASSERT(layer != nullptr);
783
telsoa01c577f2c2018-08-31 09:22:23 +0100784 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000785 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100786
787 // register the input connection slots for the layer, connections are made after all layers have been created
788 // only the tensors for the inputs are relevant, exclude the const tensors
789 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000790 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100791
jimfly01c25411c2018-11-14 17:47:22 +0000792 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100793 // register the output connection slots for the layer, connections are made after all layers have been created
794 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
795 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
796}
797
798void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
799{
800 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
801
802 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
803 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
804
805 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
806
807 DepthwiseConvolution2dDescriptor desc;
808 desc.m_BiasEnabled = false;
809 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
810 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000811 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100812 // ACL only supports a depth (channel) multiplier of 1, it is not currently stored in the descriptor
813 CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1);
814
815 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
816 CHECK_VALID_SIZE(inputs.size(), 2, 3);
817 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
818 CHECK_VALID_SIZE(outputs.size(), 1);
819
820 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
821 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
822
Matteo Martincigh747ef822018-12-18 09:26:39 +0000823 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100824 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
825 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000826
827 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100828 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
829 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
830
Matteo Martincigh747ef822018-12-18 09:26:39 +0000831 // Reshape weights as [ H, W, I, M ]
832 filterTensorInfo.SetShape({ filterHeight,
833 filterWidth,
834 inputTensorInfo.GetShape()[3],
835 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
836
837 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
838 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
839
telsoa01c577f2c2018-08-31 09:22:23 +0100840 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
841 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
842
Matteo Martincigh747ef822018-12-18 09:26:39 +0000843 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100844 armnn::IConnectableLayer* layer;
845 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
846
847 if (inputs.size() == 3)
848 {
849 desc.m_BiasEnabled = true;
850 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000851 auto biasTensorAndData = CreateConstTensor(inputs[2],
852 biasTensorInfo,
853 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100854 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
855 filterTensorAndData.first,
856 biasTensorAndData.first,
857 layerName.c_str());
858 }
859 else
860 {
861 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
862 filterTensorAndData.first,
863 layerName.c_str());
864 }
865 BOOST_ASSERT(layer != nullptr);
866
telsoa01c577f2c2018-08-31 09:22:23 +0100867 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000868 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100869
870 // register the input connection slots for the layer, connections are made after all layers have been created
871 // only the tensors for the inputs are relevant, exclude the const tensors
872 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000873 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100874
jimfly01c25411c2018-11-14 17:47:22 +0000875 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100876 // register the output connection slots for the layer, connections are made after all layers have been created
877 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
878 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
879}
880
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100881void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
882{
883 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
884}
885
886void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
887{
888 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
889}
890
891void TfLiteParser::ParsePool(size_t subgraphIndex,
892 size_t operatorIndex,
893 PoolingAlgorithm algorithm)
894{
895 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
896
897 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
898 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
899
900 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
901
902 std::string layerName;
903
904 switch (algorithm)
905 {
906 case PoolingAlgorithm::Average:
907 layerName =
908 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
909 break;
910 case PoolingAlgorithm::Max:
911 layerName =
912 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
913 break;
914 default:
915 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
916 }
917
918 Pooling2dDescriptor desc;
919
920 desc.m_PoolType = algorithm;
921 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
922 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
923 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
924 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
925 desc.m_PaddingMethod = PaddingMethod::Exclude;
926 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +0000927 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100928
929 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
930 CHECK_VALID_SIZE(inputs.size(), 1);
931 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
932
933 // assuming input is NHWC
934 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
935 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
936
937 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
938 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
939
940 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
941 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100942
943 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
944
945 BOOST_ASSERT(layer != nullptr);
946
jimfly01c25411c2018-11-14 17:47:22 +0000947 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
948 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100949
950 // register the input connection slots for the layer, connections are made after all layers have been created
951 // only the tensors for the inputs are relevant, exclude the const tensors
952 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000953 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100954
jimfly01c25411c2018-11-14 17:47:22 +0000955 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100956 // register the output connection slots for the layer, connections are made after all layers have been created
957 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
958 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
959}
960
telsoa01c577f2c2018-08-31 09:22:23 +0100961void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
962{
963 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
964 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
965 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
966
967 SoftmaxDescriptor desc;
968 desc.m_Beta = options->beta;
969
970 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
971 CHECK_VALID_SIZE(inputs.size(), 1);
972 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
973 CHECK_VALID_SIZE(outputs.size(), 1);
974
975 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
976 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
977
978 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
979 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
980
981 // register the input connection slots for the layer, connections are made after all layers have been created
982 // only the tensors for the inputs are relevant, exclude the const tensors
983 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
984 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
985
986 // register the output connection slots for the layer, connections are made after all layers have been created
987 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
988 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
989}
990
991armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
992 const armnn::TensorInfo & inputTensorInfo)
993{
994 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
995 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
996 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
997
998 if (inputTensorInfo.GetNumDimensions() > 4)
999 {
1000 std::stringstream ss;
1001 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1002 << " shape:" << inputTensorInfo.GetShape() << " "
1003 << CHECK_LOCATION().AsString();
1004 throw ParseException(ss.str());
1005 }
1006
1007 if (squeezeDims.empty())
1008 {
1009 squeezeDims.assign(dimensionSequence,
1010 dimensionSequence+inputTensorInfo.GetNumDimensions());
1011 }
1012
1013 std::vector<uint32_t> outputDims;
1014 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1015 {
1016 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1017 auto currentDimension = inputTensorInfo.GetShape()[i];
1018 if (skipSqueeze || currentDimension != 1)
1019 {
1020 outputDims.push_back(currentDimension);
1021 }
1022 }
1023
1024 if (outputDims.size() > 4)
1025 {
1026 std::stringstream ss;
1027 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1028 << " shape:" << inputTensorInfo.GetShape() << " "
1029 << CHECK_LOCATION().AsString();
1030 throw ParseException(ss.str());
1031 }
1032
1033 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1034 outputDims.data());
1035
1036 // we need to preserve the tensor type and the quantization data as well
1037 TensorInfo outTensorInfo = inputTensorInfo;
1038 outTensorInfo.SetShape(outShape);
1039
1040 return outTensorInfo;
1041}
1042
1043void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1044{
1045 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1046
1047 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1048 CHECK_VALID_SIZE(inputs.size(), 1);
1049
1050 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1051 CHECK_VALID_SIZE(outputs.size(), 1);
1052
1053 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1054 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1055
1056 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1057 armnn::TensorInfo outputTensorInfo =
1058 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1059 inputTensorInfo);
1060
1061 ReshapeDescriptor reshapeDesc;
1062 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1063
1064 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1065 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1066 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1067
1068 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1069 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1070
1071 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1072 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1073}
1074
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001075void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1076{
1077 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1078
1079 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1080 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1081
1082 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1083 CHECK_VALID_SIZE(inputs.size(), 2);
1084
1085 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1086 CHECK_VALID_SIZE(outputs.size(), 1);
1087
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001088 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1089 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1090
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001091 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1092 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1093
1094 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1095 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1096
1097 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001098 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1099 {
1100 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1101 }
1102 else
1103 {
1104 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1105 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001106
1107 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1108
1109 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1110 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1111}
1112
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001113void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1114{
1115 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1116
1117 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1118 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1119
1120 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1121 CHECK_VALID_SIZE(inputs.size(), 2);
1122
1123 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1124 CHECK_VALID_SIZE(outputs.size(), 1);
1125
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001126 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1127 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1128
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001129 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1130 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1131
1132 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1133 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1134
1135 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001136 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1137 {
1138 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1139 }
1140 else
1141 {
1142 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1143 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001144
1145 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1146
1147 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1148 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1149}
1150
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001151void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1152{
1153 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1154
1155 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1156
1157 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1158 CHECK_VALID_SIZE(outputs.size(), 1);
1159
1160 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1161 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1162
1163 armnn::MeanDescriptor desc;
1164 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1165 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1166 desc.m_Axis = axis;
1167
1168 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1169 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1170
1171 desc.m_KeepDims =
1172 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1173 true : false;
1174
1175 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1176 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1177
1178 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1179
1180 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1181 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1182
1183 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1184 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1185}
1186
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001187void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1188{
1189 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1190
1191 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1192
1193 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1194 CHECK_VALID_SIZE(outputs.size(), 1);
1195
1196 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1197 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1198
1199 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1200 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1201
1202 size_t step = 2;
1203 armnn::PadDescriptor desc;
1204 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1205 {
1206 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1207 }
1208
1209 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1210 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1211
1212 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1213 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1214
1215 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1216 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1217
1218 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1219 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1220}
1221
Sadik Armagan58f39192018-09-17 14:14:39 +01001222void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1223{
1224 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1225
1226 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1227 boost::ignore_unused(operatorPtr);
1228
1229 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1230 CHECK_VALID_SIZE(inputs.size(), 1);
1231
1232 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1233 CHECK_VALID_SIZE(outputs.size(), 1);
1234
1235 auto layerName = str(boost::format("Activation:RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1236 ActivationDescriptor activationDesc;
1237 activationDesc.m_Function = ActivationFunction::ReLu;
1238 IConnectableLayer* const layer =
1239 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1240
1241 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1242 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1243
1244 // register the input connection slots for the layer, connections are made after all layers have been created
1245 // only the tensors for the inputs are relevant, exclude the const tensors
1246 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1247 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1248
1249 // register the output connection slots for the layer, connections are made after all layers have been created
1250 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1251 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1252}
1253
1254void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1255{
1256 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1257
1258 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1259 boost::ignore_unused(operatorPtr);
1260
1261 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1262 CHECK_VALID_SIZE(inputs.size(), 1);
1263
1264 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1265 CHECK_VALID_SIZE(outputs.size(), 1);
1266
1267 auto layerName = str(boost::format("Activation:RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1268 ActivationDescriptor activationDesc;
1269 activationDesc.m_Function = ActivationFunction::BoundedReLu;
1270 activationDesc.m_A = 6.0f;
1271 activationDesc.m_B = 0.0f;
1272 IConnectableLayer* const layer =
1273 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1274
1275 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1276 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1277
1278 // register the input connection slots for the layer, connections are made after all layers have been created
1279 // only the tensors for the inputs are relevant, exclude the const tensors
1280 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1281 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1282
1283 // register the output connection slots for the layer, connections are made after all layers have been created
1284 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1285 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1286}
1287
Sadikb94967b2018-09-19 15:30:00 +01001288armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1289 const std::vector<int32_t> & targetDimsIn)
1290{
1291 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1292 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1293
1294 if (stretchDim != targetDimsIn.end())
1295 {
1296 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1297 {
1298 throw ParseException(
1299 boost::str(
1300 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1301 }
1302
1303 auto targetNumElements =
1304 boost::numeric_cast<unsigned int>(
1305 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1306
1307 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1308 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1309 }
1310
1311 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1312
1313 TensorInfo reshapeInfo = inputTensorInfo;
1314 reshapeInfo.SetShape(outputShape);
1315
1316 return reshapeInfo;
1317}
1318
1319void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1320{
1321 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1322
1323 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001324
1325 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1326 CHECK_VALID_SIZE(outputs.size(), 1);
1327
1328 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1329 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1330
1331 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001332 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1333 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001334 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1335
kevmay0171972a82018-12-17 14:28:03 +00001336 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001337 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1338 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001339 {
1340 std::stringstream ss;
1341 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001342 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001343 << " does not equal output shape "
1344 << actualOutputTensorInfo.GetShape()
1345 << ": "
1346 << CHECK_LOCATION().AsString();
1347 throw ParseException(ss.str());
1348 }
1349
Sadikb94967b2018-09-19 15:30:00 +01001350 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001351 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001352
1353 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1354 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001355 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001356
1357 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1358 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1359
1360 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1361 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1362}
1363
Sadik Armagan479045b2018-10-01 11:51:37 +01001364void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1365{
1366 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1367
1368 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1369 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1370
1371 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1372
1373 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1374 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1375 CHECK_VALID_SIZE(outputs.size(), 1);
1376
1377 unsigned int numInputs = static_cast<unsigned int>(inputs.size());
1378 unsigned int numConcatView = numInputs;
1379
1380 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), MaxNumOfTensorDimensions);
1381 std::vector<unsigned int>mergeDimSizes(MaxNumOfTensorDimensions, 0u);
1382
1383 unsigned int mergeDim = 0;
1384
1385 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
1386 // axis could also be negative numbers. Negative axis are interpreted as counting from the end of the rank,
1387 // i.e., axis + rank(values)-th dimension.
1388 int32_t inputRank = static_cast<int32_t>(ToTensorInfo(inputs[0]).GetNumDimensions());
1389 const unsigned int concatDimInput = static_cast<unsigned int>((inputRank + options->axis) % inputRank);
1390
1391 // ArmNN supports concatenation along the channel dimension for data formats NHWC and NCHW.
1392 if (concatDimInput == 0 || concatDimInput == 2)
1393 {
1394 throw ParseException(
1395 boost::str(
1396 boost::format(
1397 "Dimension %1% for concatenation is not supported by Armnn. "
1398 "Node %2%")
1399 % concatDimInput
1400 % CHECK_LOCATION().AsString()));
1401 }
1402
1403 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1404 {
1405 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1406
1407 // process the input tensor info
1408 armnnUtils::ProcessConcatInputTensorInfo(inputTensorInfo, concatDescriptor,
1409 concatDimInput, viewIndex, mergeDimSizes, mergeDim);
1410 }
1411
1412 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
1413 IConnectableLayer* layer = m_Network->AddMergerLayer(concatDescriptor, layerName.c_str());
1414
1415 BOOST_ASSERT(layer != nullptr);
1416
1417 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1418 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1419 if (concatDimInput == 3)
1420 {
1421 // Adding Fused Activation Layer after this moment....
1422 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1423 {
1424 // add permute layers to swizzle the inputs
1425 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1426 IConnectableLayer* const swizzleLayer = SwizzleIn(*m_Network, layer, viewIndex, inputTensorInfo);
1427
1428 BOOST_ASSERT(swizzleLayer != nullptr);
1429
1430 // register the input connection slots for the layer
1431 // only the tensors for the inputs are relevant, exclude the const tensors
1432 RegisterInputSlots(subgraphIndex, operatorIndex, swizzleLayer, {inputTensorIndexes[viewIndex]});
1433 }
1434
1435 // add permute layer to deswizzle the output
1436 IConnectableLayer* const deswizzleLayer = DeswizzleOut(*m_Network, layer, 0, outputTensorInfo);
1437
1438 // add fused activation layer after the trailing swizzle layer
1439 layer = AddFusedActivationLayer(deswizzleLayer, 0, options->fused_activation_function);
1440 }
1441 else
1442 {
1443 // set the layer output tensor info
1444 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1445
1446 // register the input connection slots for the layer, connections are made after all layers have been created
1447 // only the tensors for the inputs are relevant, exclude the const tensors
1448 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
1449 }
1450
1451 // register the output connection slots for the layer, connections are made after all layers have been created
1452 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1453 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1454}
1455
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001456void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1457{
1458 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1459
1460 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1461 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1462
1463 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1464
1465 FullyConnectedDescriptor desc;
1466 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001467 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001468
1469 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1470 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1471 CHECK_VALID_SIZE(outputs.size(), 1);
1472
1473 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1474
1475 // Fully Connected Layer accepts two dimensional weights input
1476 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1477 if (weightsDimension != 2)
1478 {
1479 throw ParseException(
1480 boost::str(
1481 boost::format(
1482 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1483 "Node %2%")
1484 % weightsDimension
1485 % CHECK_LOCATION().AsString()));
1486 }
1487
Matteo Martincigh747ef822018-12-18 09:26:39 +00001488 auto filterTensorAndData = CreateConstTensor(inputs[1],
1489 filterTensorInfo,
1490 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001491 armnn::IConnectableLayer* layer;
1492 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1493
1494 if (inputs.size() == 3)
1495 {
1496 desc.m_BiasEnabled = true;
1497 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001498 auto biasTensorAndData = CreateConstTensor(inputs[2],
1499 biasTensorInfo,
1500 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001501 layer = m_Network->AddFullyConnectedLayer(desc,
1502 filterTensorAndData.first,
1503 biasTensorAndData.first,
1504 layerName.c_str());
1505 }
1506 else
1507 {
1508 layer = m_Network->AddFullyConnectedLayer(desc,
1509 filterTensorAndData.first,
1510 layerName.c_str());
1511 }
1512 BOOST_ASSERT(layer != nullptr);
1513
1514 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1515 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1516
1517 // register the input connection slot for the layer
1518 // only the tensors for the inputs are relevant, exclude the const tensors
1519 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1520 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1521
1522 // we need to add the activation layer and fortunately we don't need to care about the data layout
1523 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1524 options->fused_activation_function);
1525 // register the output connection slots for the layer, connections are made after all layers have been created
1526 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1527 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1528}
1529
Sadik Armagan58f39192018-09-17 14:14:39 +01001530armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
1531 unsigned int outputSlot,
1532 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01001533{
1534 ActivationDescriptor activationDesc;
1535 std::string layerName = prevLayer->GetName();
1536
1537 switch(activationType)
1538 {
1539 case tflite::ActivationFunctionType_NONE:
1540 {
1541 // this is a no-op: return previous layer
1542 return prevLayer;
1543 }
1544 case tflite::ActivationFunctionType_RELU:
1545 {
1546 activationDesc.m_Function = ActivationFunction::ReLu;
1547 layerName += ":RELU";
1548 break;
1549 }
1550 case tflite::ActivationFunctionType_RELU6:
1551 {
1552 activationDesc.m_Function = ActivationFunction::BoundedReLu;
1553 activationDesc.m_A = 6.0f;
1554 activationDesc.m_B = 0.0f;
1555 layerName += ":RELU6";
1556 break;
1557 }
1558 case tflite::ActivationFunctionType_TANH:
1559 {
1560 activationDesc.m_Function = ActivationFunction::TanH;
1561 activationDesc.m_A = 1.0f;
1562 activationDesc.m_B = 1.0f;
1563 layerName += ":TANH";
1564 break;
1565 }
1566
1567 // I only put these here as a reminder what others we could support
1568 case tflite::ActivationFunctionType_RELU_N1_TO_1:
1569 case tflite::ActivationFunctionType_SIGN_BIT:
1570 default:
1571 {
1572 throw ParseException(
1573 boost::str(
1574 boost::format("TfLite parser doesn't suppport fused activation: "
1575 "%1%/%2% %3% ") %
1576 activationType %
1577 tflite::EnumNameActivationFunctionType(activationType) %
1578 CHECK_LOCATION().AsString()));
1579
1580 }
1581 }
1582
1583 IConnectableLayer* activationLayer =
1584 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1585
1586 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
1587 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
1588 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
1589 return activationLayer;
1590}
1591
1592TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
1593{
1594 if (fileName == nullptr)
1595 {
1596 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
1597 CHECK_LOCATION().AsString()));
1598 }
1599 boost::system::error_code errorCode;
1600 boost::filesystem::path pathToFile(fileName);
1601 if (!boost::filesystem::exists(pathToFile, errorCode))
1602 {
1603 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
1604 fileName %
1605 errorCode %
1606 CHECK_LOCATION().AsString()));
1607 }
1608 std::ifstream file(fileName, std::ios::binary);
1609 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
1610 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
1611 fileContent.size());
1612}
1613
1614TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
1615{
1616 if (binaryContent == nullptr)
1617 {
1618 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
1619 CHECK_LOCATION().AsString()));
1620 }
1621 flatbuffers::Verifier verifier(binaryContent, len);
1622 if (verifier.VerifyBuffer<tflite::Model>() == false)
1623 {
1624 throw ParseException(
1625 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
1626 "flatbuffers format. size:%1% %2%") %
1627 len %
1628 CHECK_LOCATION().AsString()));
1629 }
1630 return tflite::UnPackModel(binaryContent);
1631}
1632
1633TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
1634 size_t subgraphIndex,
1635 size_t operatorIndex)
1636{
1637 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1638
1639 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1640 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1641
1642 size_t inputCount = operatorPtr->inputs.size();
1643 TensorRawPtrVector result(inputCount);
1644 for (size_t i=0; i<inputCount; ++i)
1645 {
1646 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
1647 result[i] = subGraphPtr->tensors[inputId].get();
1648 }
1649 return result;
1650}
1651
1652TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
1653 size_t subgraphIndex,
1654 size_t operatorIndex)
1655{
1656 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1657
1658 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1659 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1660
1661 size_t outputCount = operatorPtr->outputs.size();
1662 TensorRawPtrVector result(outputCount);
1663 for (size_t i=0; i<outputCount; ++i)
1664 {
1665 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
1666 CHECK_TENSOR(model, subgraphIndex, outputId);
1667 result[i] = subGraphPtr->tensors[outputId].get();
1668 }
1669 return result;
1670}
1671
1672TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
1673 size_t subgraphIndex)
1674{
1675 CHECK_SUBGRAPH(model, subgraphIndex);
1676 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1677
1678 size_t inputCount = subGraphPtr->inputs.size();
1679 TensorIdRawPtrVector result(inputCount);
1680 for (size_t i=0; i<inputCount; ++i)
1681 {
1682 uint32_t inputId = CHECKED_NON_NEGATIVE(subGraphPtr->inputs[i]);
1683 CHECK_TENSOR(model, subgraphIndex, inputId);
1684 result[i] = std::make_pair(inputId, subGraphPtr->tensors[inputId].get());
1685 }
1686 return result;
1687}
1688
1689TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
1690 size_t subgraphIndex)
1691{
1692 CHECK_SUBGRAPH(model, subgraphIndex);
1693 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1694
1695 size_t outputCount = subGraphPtr->outputs.size();
1696 TensorIdRawPtrVector result(outputCount);
1697 for (size_t i=0; i<outputCount; ++i)
1698 {
1699 uint32_t outputId = CHECKED_NON_NEGATIVE(subGraphPtr->outputs[i]);
1700 result[i] = std::make_pair(outputId, subGraphPtr->tensors[outputId].get());
1701 }
1702 return result;
1703}
1704
1705std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
1706 size_t subgraphIndex,
1707 size_t operatorIndex)
1708{
1709 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1710 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1711 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1712 return operatorPtr->inputs;
1713}
1714
1715std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
1716 size_t subgraphIndex,
1717 size_t operatorIndex)
1718{
1719 CHECK_MODEL(model, subgraphIndex, operatorIndex);
1720 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
1721 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
1722 return operatorPtr->outputs;
1723}
1724
1725void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
1726 size_t operatorIndex,
1727 IConnectableLayer* layer,
1728 const std::vector<unsigned int>& tensorIndexes)
1729{
1730 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1731 BOOST_ASSERT(layer != nullptr);
1732 if (tensorIndexes.size() != layer->GetNumInputSlots())
1733 {
1734 throw ParseException(
1735 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
1736 " for subgraph:%3% operator index:%4% %5%") %
1737 tensorIndexes.size() %
1738 layer->GetNumInputSlots() %
1739 subgraphIndex %
1740 operatorIndex %
1741 CHECK_LOCATION().AsString()));
1742 }
1743
1744 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
1745 {
1746 unsigned int tensorIndex = tensorIndexes[slotIndex];
1747 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
1748 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
1749 }
1750}
1751
1752void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
1753 size_t operatorIndex,
1754 IConnectableLayer* layer,
1755 const std::vector<unsigned int>& tensorIndexes)
1756{
1757 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1758 BOOST_ASSERT(layer != nullptr);
1759 if (tensorIndexes.size() != layer->GetNumOutputSlots())
1760 {
1761 throw ParseException(
1762 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
1763 " for subgraph:%3% operator index:%4% %5%") %
1764 tensorIndexes.size() %
1765 layer->GetNumOutputSlots() %
1766 subgraphIndex %
1767 operatorIndex %
1768 CHECK_LOCATION().AsString()));
1769 }
1770
1771 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1772 {
1773 unsigned int tensorIndex = tensorIndexes[slotIndex];
1774 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
1775 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
1776 }
1777}
1778
1779void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
1780{
1781 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1782
1783 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
1784 for (auto const & tensorIdAndPtr : inputs)
1785 {
1786 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
1787 IConnectableLayer* layer =
1788 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
1789
1790 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
1791 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1792
1793 RegisterOutputSlots(subgraphIndex,
1794 VIRTUAL_OPERATOR_ID,
1795 layer,
1796 { static_cast<uint32_t>(tensorIdAndPtr.first) });
1797 }
1798}
1799
1800void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
1801{
1802 CHECK_SUBGRAPH(m_Model, subgraphIndex);
1803
1804 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
1805 for (auto const & tensorIdAndPtr : outputs)
1806 {
1807 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
1808 IConnectableLayer* layer =
1809 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
1810
1811 RegisterInputSlots(subgraphIndex,
1812 VIRTUAL_OPERATOR_ID,
1813 layer,
1814 { static_cast<uint32_t>(tensorIdAndPtr.first) });
1815 }
1816}
1817
1818// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
1819TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
1820{
1821 CHECK_BUFFER(model, bufferIndex);
1822 return model->buffers[bufferIndex].get();
1823}
1824
Matteo Martincigh747ef822018-12-18 09:26:39 +00001825template<typename T>
1826std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
1827TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
1828 TfLiteParser::TensorRawPtr tensorPtr,
1829 armnn::TensorInfo& tensorInfo,
1830 armnn::Optional<armnn::PermutationVector&> permutationVector)
1831{
1832 auto constData = CreateConstTensorImpl<T>(bufferPtr,
1833 tensorPtr,
1834 tensorInfo,
1835 permutationVector);
1836 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
1837 return std::make_pair(constData.first, std::move(storage));
1838}
1839
telsoa01c577f2c2018-08-31 09:22:23 +01001840std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
1841TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001842 armnn::TensorInfo& tensorInfo,
1843 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01001844{
1845 CHECK_TENSOR_PTR(tensorPtr);
1846 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
1847 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
1848
1849 switch (tensorInfo.GetDataType())
1850 {
1851 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001852 return CreateConstTensorAndStoreData<float>(bufferPtr,
1853 tensorPtr,
1854 tensorInfo,
1855 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001856 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001857 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
1858 tensorPtr,
1859 tensorInfo,
1860 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001861 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00001862 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
1863 tensorPtr,
1864 tensorInfo,
1865 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001866 default:
1867 {
1868 std::stringstream errString;
1869 errString << "Unexpected datatype when creating const tensor: "
1870 << armnn::GetDataTypeName(tensorInfo.GetDataType())
1871 << " shape:" << tensorInfo.GetShape()
1872 << CHECK_LOCATION().AsString();
1873 throw ParseException(errString.str());
1874 }
1875 }
1876}
1877
1878BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
1879 const std::string& name) const
1880{
1881 CHECK_SUBGRAPH(m_Model, subgraphId);
1882 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
1883 for (auto const & input : inputs)
1884 {
1885 if (input.second->name == name)
1886 {
1887 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
1888 return std::make_pair(bindingId, ToTensorInfo(input.second));
1889 }
1890 }
1891
1892 std::stringstream bindings;
1893 for (auto const & input : inputs)
1894 {
1895 bindings << "'" << input.second->name << "' ";
1896 }
1897
1898 throw ParseException(
1899 boost::str(
1900 boost::format("No input binding found for subgraph:%1% and name:%2%. "
1901 "Possible inputs are: [%3%] %4%") %
1902 subgraphId %
1903 name %
1904 bindings.str() %
1905 CHECK_LOCATION().AsString()));
1906}
1907
1908BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
1909 const std::string& name) const
1910{
1911 CHECK_SUBGRAPH(m_Model, subgraphId);
1912 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
1913 for (auto const & output : outputs)
1914 {
1915 if (output.second->name == name)
1916 {
1917 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
1918 return std::make_pair(bindingId, ToTensorInfo(output.second));
1919 }
1920 }
1921
1922 std::stringstream bindings;
1923 for (auto const & output : outputs)
1924 {
1925 bindings << "'" << output.second->name << "' ";
1926 }
1927
1928 throw ParseException(
1929 boost::str(
1930 boost::format("No output binding found for subgraph:%1% and name:%2%. "
1931 "Possible outputs are: [%3%] %4%") %
1932 subgraphId %
1933 name %
1934 bindings.str() %
1935 CHECK_LOCATION().AsString()));
1936}
1937
1938size_t TfLiteParser::GetSubgraphCount() const
1939{
1940 return m_Model->subgraphs.size();
1941}
1942
1943std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
1944{
1945 CHECK_SUBGRAPH(m_Model, subgraphId);
1946 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
1947 std::vector<std::string> result;
1948 result.reserve(inputs.size());
1949 for (auto const & input : inputs)
1950 {
1951 result.push_back(input.second->name);
1952 }
1953 return result;
1954}
1955
1956std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
1957{
1958 CHECK_SUBGRAPH(m_Model, subgraphId);
1959 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
1960 std::vector<std::string> result;
1961 result.reserve(outputs.size());
1962 for (auto const & output : outputs)
1963 {
1964 result.push_back(output.second->name);
1965 }
1966 return result;
1967}
1968
1969ITfLiteParser* ITfLiteParser::CreateRaw()
1970{
1971 return new TfLiteParser();
1972}
1973
1974ITfLiteParserPtr ITfLiteParser::Create()
1975{
1976 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
1977}
1978
1979void ITfLiteParser::Destroy(ITfLiteParser* parser)
1980{
1981 delete parser;
1982}
1983
1984TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
1985: m_FloatData(std::move(data))
1986, m_Uint8Data(nullptr)
1987, m_Int32Data(nullptr)
1988{
1989}
1990
1991TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
1992: m_FloatData(nullptr)
1993, m_Uint8Data(std::move(data))
1994, m_Int32Data(nullptr)
1995{
1996}
1997
1998TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
1999: m_FloatData(nullptr)
2000, m_Uint8Data(nullptr)
2001, m_Int32Data(std::move(data))
2002{
2003}
2004
2005} // armnnTfLiteParser