blob: b7258b3ffc65c661e9103cae1455f6bb33aeea8f [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026
27#include <fstream>
28#include <algorithm>
29#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010030#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000031#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010032
33using namespace armnn;
34using armnn::CheckLocation;
35namespace armnnTfLiteParser
36{
37namespace
38{
jimfly01c25411c2018-11-14 17:47:22 +000039
telsoa01c577f2c2018-08-31 09:22:23 +010040const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
41
42void CheckSubgraph(const TfLiteParser::ModelPtr & model,
43 size_t subgraphIndex,
44 const CheckLocation & location)
45{
46 if (model.get() == nullptr)
47 {
48 throw ParseException(
49 boost::str(
50 boost::format("%1% was called with invalid (null) model. "
51 "Possible reason is that the model is not yet loaded and Unpack(ed). "
52 "subgraph:%2% at %3%") %
53 location.m_Function %
54 subgraphIndex %
55 location.FileLine()));
56 }
57 else if (subgraphIndex >= model->subgraphs.size())
58 {
59 throw ParseException(
60 boost::str(
61 boost::format("%1% was called with an invalid subgraph index. "
62 "subgraph:%2% at %3%") %
63 location.m_Function %
64 subgraphIndex %
65 location.FileLine()));
66 }
67}
68
69#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
70 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
71
72void CheckModel(const TfLiteParser::ModelPtr & model,
73 size_t subgraphIndex,
74 size_t operatorIndex,
75 const CheckLocation & location)
76{
77 if (model.get() == nullptr)
78 {
79 throw ParseException(
80 boost::str(
81 boost::format("%1% was called with invalid (null) model. "
82 "Possible reason is that the model is not yet loaded and Unpack(ed). "
83 "subgraph:%2% operator:%3% at %4%") %
84 location.m_Function %
85 subgraphIndex %
86 operatorIndex %
87 location.FileLine()));
88 }
89 else if (subgraphIndex >= model->subgraphs.size())
90 {
91 throw ParseException(
92 boost::str(
93 boost::format("%1% was called with an invalid subgraph index. "
94 "subgraph:%2% operator:%3% at %4%") %
95 location.m_Function %
96 subgraphIndex %
97 operatorIndex %
98 location.FileLine()));
99 }
100 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
101 operatorIndex != VIRTUAL_OPERATOR_ID)
102 {
103 throw ParseException(
104 boost::str(
105 boost::format("%1% was called with an invalid operator index. "
106 "subgraph:%2% operator:%3% at %4%") %
107 location.m_Function %
108 subgraphIndex %
109 operatorIndex %
110 location.FileLine()));
111 }
112}
113
114#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
115 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
116
117void CheckTensor(const TfLiteParser::ModelPtr & model,
118 size_t subgraphIndex,
119 size_t tensorIndex,
120 const CheckLocation & location)
121{
122 // not checking model, because I assume CHECK_MODEL already run
123 // and checked that. An assert would do.
124 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
125
126 // also subgraph index should be checked by CHECK_MODEL so
127 // I only add an assert here
128 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
129
130 // the tensor index is the only one to check here
131 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
132 {
133 throw ParseException(
134 boost::str(
135 boost::format("%1% was called with an invalid tensor index. "
136 "subgraph:%2% tensor:%3% at %4%") %
137 location.m_Function %
138 subgraphIndex %
139 tensorIndex %
140 location.FileLine()));
141 }
142}
143
144#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
145 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
146
147void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
148 const CheckLocation & location)
149{
150 if (rawPtr == nullptr)
151 {
152 throw ParseException(
153 boost::str(
154 boost::format("%1% was called with a null tensor pointer. "
155 "at %2%") %
156 location.m_Function %
157 location.FileLine()));
158
159 }
160}
161
162#define CHECK_TENSOR_PTR(TENSOR_PTR) \
163 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
164
165void CheckBuffer(const TfLiteParser::ModelPtr & model,
166 size_t bufferIndex,
167 const CheckLocation & location)
168{
169 if (model.get() == nullptr)
170 {
171 throw ParseException(
172 boost::str(
173 boost::format("%1% was called with invalid (null) model. "
174 "Possible reason is that the model is not yet loaded and Unpack(ed). "
175 "buffer:%2% at %3%") %
176 location.m_Function %
177 bufferIndex %
178 location.FileLine()));
179 }
180 else if (bufferIndex >= model->buffers.size())
181 {
182 throw ParseException(
183 boost::str(
184 boost::format("%1% was called with an invalid buffer index. "
185 "buffer index:%2% at %3%") %
186 location.m_Function %
187 bufferIndex %
188 location.FileLine()));
189 }
190 else if (model->buffers[bufferIndex].get() == nullptr)
191 {
192 throw ParseException(
193 boost::str(
194 boost::format("The buffer #%1% is null. %3%") %
195 bufferIndex %
196 location.AsString()));
197 }
198}
199
200#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
201 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
202
203void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
204 const armnn::TensorInfo & tensorInfo,
205 uint32_t bufferId,
206 const CheckLocation & location)
207{
208 if (bufferPtr == nullptr)
209 {
210 throw ParseException(
211 boost::str(
212 boost::format("BufferPtr is null for buffer:%1%. %2%") %
213 bufferId %
214 location.AsString()));
215 }
216 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
217 tensorInfo.GetNumBytes() > bufferPtr->data.size())
218 {
219 std::stringstream ss;
220 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
221 << "For tensor: " << tensorInfo.GetShape()
222 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
223 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
224 throw ParseException(ss.str());
225 }
226}
227
228#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
229 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
230
Kevin May83add212019-03-26 11:39:19 +0000231uint32_t CheckDilation(const int32_t dilationFactor,
232 size_t operatorIndex,
233 const CheckLocation& location)
234{
235 if (dilationFactor != 1)
236 {
237 std::stringstream ss;
238 ss << "ArmNN only supports convolution layers with dilations [1,1,1,1] for operator with index "
239 << operatorIndex << location.AsString();
240 throw ParseException(ss.str());
241 }
242
243 return static_cast<uint32_t>(dilationFactor);
244}
245
246#define CHECK_DILATION(DILATION_FACTOR, OPERATOR_INDEX) \
247 CheckDilation(DILATION_FACTOR, OPERATOR_INDEX, CHECK_LOCATION())
248
telsoa01c577f2c2018-08-31 09:22:23 +0100249bool IsActivationSupported(tflite::ActivationFunctionType activationType)
250{
251 switch(activationType)
252 {
253 case tflite::ActivationFunctionType_NONE:
254 case tflite::ActivationFunctionType_RELU:
255 case tflite::ActivationFunctionType_RELU6:
256 case tflite::ActivationFunctionType_TANH:
257 {
258 return true;
259 }
260 default:
261 {
262 return false;
263 }
264 }
265}
266
267#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
268 do { \
269 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
270 { \
271 throw ParseException( \
272 boost::str( \
273 boost::format("TfLite parser doesn't suppport fused activation: " \
274 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
275 OPTION->fused_activation_function % \
276 tflite::EnumNameActivationFunctionType(\
277 OPTION->fused_activation_function) % \
278 __func__ % \
279 SUBGRAPH_INDEX % \
280 OPERATOR_INDEX % \
281 CHECK_LOCATION().FileLine())); \
282 } \
283 } while(false)
284
285
286std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
287{
288 std::vector<unsigned int> result;
289 result.reserve(in.size());
290 for (auto & i : in)
291 {
292 result.push_back(CHECKED_NON_NEGATIVE(i));
293 }
294 return result;
295}
296
297void CalcPadding(uint32_t inputSize,
298 uint32_t filterSize,
299 uint32_t stride,
300 uint32_t& paddingFront,
301 uint32_t& paddingBack,
302 tflite::Padding padding)
303{
304 paddingFront = 0;
305 paddingBack = 0;
306 if (padding == tflite::Padding_SAME)
307 {
308 uint32_t outputSize = (inputSize + stride - 1) / stride;
309 uint32_t temp = (outputSize - 1) * stride + filterSize;
310 if (temp > inputSize)
311 {
312 paddingFront = (temp - inputSize) / 2;
313 paddingBack = (temp - inputSize) - paddingFront;
314 }
315 }
316}
317
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000318armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100319{
320 armnn::DataType type;
321 CHECK_TENSOR_PTR(tensorPtr);
322
323 switch (tensorPtr->type)
324 {
325 case tflite::TensorType_UINT8:
326 type = armnn::DataType::QuantisedAsymm8;
327 break;
328 case tflite::TensorType_FLOAT32:
329 type = armnn::DataType::Float32;
330 break;
331 case tflite::TensorType_INT32:
332 type = armnn::DataType::Signed32;
333 break;
334
335 default:
336 {
337 CheckLocation location = CHECK_LOCATION();
338 throw ParseException(
339 boost::str(
340 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
341 tensorPtr->type %
342 tflite::EnumNameTensorType(tensorPtr->type) %
343 tensorPtr->name %
344 location.AsString()));
345 }
346 }
347
348 float quantizationScale = 0.0f;
349 int32_t quantizationOffset = 0;
350
351 if (tensorPtr->quantization.get())
352 {
353 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
354 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
355
356 if (tensorPtr->quantization->scale.size() == 1)
357 {
358 quantizationScale = tensorPtr->quantization->scale[0];
359 }
360 if (tensorPtr->quantization->zero_point.size() == 1)
361 {
362 // NOTE: we lose precision here when converting from 64 bit to 32
363 // but this is what we support at the monent in ArmNN
364 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
365 }
366 }
367
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100368 std::vector<unsigned int> safeShape = shapes;
369 if (safeShape.size() == 0)
370 {
371 safeShape.push_back(1);
372 }
373
telsoa01c577f2c2018-08-31 09:22:23 +0100374 // two statements (on purpose) for easier debugging:
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100375 armnn::TensorInfo result(static_cast<unsigned int>(safeShape.size()),
376 safeShape.data(),
telsoa01c577f2c2018-08-31 09:22:23 +0100377 type,
378 quantizationScale,
379 quantizationOffset);
380 return result;
381}
382
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000383armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
384{
385 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
386 return ToTensorInfo(tensorPtr, dimensions);
387}
388
telsoa01c577f2c2018-08-31 09:22:23 +0100389template<typename T>
390std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
391CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
392 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000393 armnn::TensorInfo& tensorInfo,
394 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100395{
396 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
397 BOOST_ASSERT_MSG(bufferPtr != nullptr,
398 boost::str(
399 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
400
401 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000402
403 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
404 {
405 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000406 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
407 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000408 }
409 else
410 {
411 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
412 }
413
telsoa01c577f2c2018-08-31 09:22:23 +0100414 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
415}
416
telsoa01c577f2c2018-08-31 09:22:23 +0100417armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
418{
419 // generate the binding id by shifting the tensor id by 8 bit
420 // and add the subgraph id, which allows 256 subgraphs
421 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
422}
423
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000424bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
425{
426 const unsigned int actualSize = actual.GetNumDimensions();
427 if (actualSize != expected.size())
428 {
429 return false;
430 }
431
432 for (unsigned int i = 0u; i < actualSize; i++)
433 {
434 if (expected[i] < 0 ||
435 actual[i] != static_cast<unsigned int>(expected[i]))
436 {
437 return false;
438 }
439 }
440
441 return true;
442}
443
telsoa01c577f2c2018-08-31 09:22:23 +0100444} // <anonymous>
445
446TfLiteParser::TfLiteParser()
447: m_Network(nullptr, nullptr)
448, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
449{
450 // register supported operators
451 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200452 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100453 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100454 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
455 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000456 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100457 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000458 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100459 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200460 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200461 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan58f39192018-09-17 14:14:39 +0100462 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
463 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100464 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -0200465 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
Sadik Armagan479045b2018-10-01 11:51:37 +0100466 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
Bruno Goncalvesbaded142019-02-08 19:02:48 -0200467 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100468 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalves451d95b2019-02-12 22:59:22 -0200469 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200470 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200471 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200472 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200473 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200474 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
Nina Drozd0324f482019-04-08 10:52:10 +0100475 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Nina Drozd99851762019-04-09 09:37:38 +0100476 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
Nina Drozd200e3802019-04-15 09:47:39 +0100477 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
telsoa01c577f2c2018-08-31 09:22:23 +0100478}
479
480void TfLiteParser::ResetParser()
481{
482 m_Network = armnn::INetworkPtr(nullptr, nullptr);
483 m_Model = nullptr;
484 m_SubgraphConnections.clear();
485}
486
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200487void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
488 size_t operatorIndex,
489 IConnectableLayer *layer)
490{
491 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
492 BOOST_ASSERT(layer != nullptr);
493
494 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
495 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
496
497 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
498
499 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
500 TensorRawPtr tensorPtr = subGraphPtr->tensors[reshapedInputId].get();
501 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
502 TensorRawPtr tensorPtr1 = subGraphPtr->tensors[inputId].get();
503
504 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
505 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
506
507 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
508 {
509 uint32_t id = reshapedInputId;
510 reshapedInputId = inputId;
511 inputId = id;
512
513 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
514 inputTensorInfo = ToTensorInfo(tensorPtr);
515 }
516
517 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
518
519 std::vector<unsigned> reshapedDim;
520 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
521 {
522 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
523 }
524
525 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
526 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
527
528 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
529
530 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
531 armnn::ReshapeDescriptor desc;
532 desc.m_TargetShape = reshapedTensorInfo.GetShape();
533 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
534
535 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
536 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
537
538 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
539
540 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
541 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
542}
543
telsoa01c577f2c2018-08-31 09:22:23 +0100544INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
545{
546 ResetParser();
547 m_Model = LoadModelFromFile(graphFile);
548 return CreateNetworkFromModel();
549}
550
551INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
552{
553 ResetParser();
554 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
555 return CreateNetworkFromModel();
556}
557
558INetworkPtr TfLiteParser::CreateNetworkFromModel()
559{
560 m_Network = INetwork::Create();
561 BOOST_ASSERT(m_Model.get() != nullptr);
562
563 bool failedToCreate = false;
564 std::stringstream errors;
565
566 if (m_Model->subgraphs.size() != 1)
567 {
568 throw ParseException(
569 boost::str(
570 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
571 m_Model->subgraphs.size() %
572 CHECK_LOCATION().AsString()));
573 }
574
575 size_t subgraphIndex = 0;
576 for (SubGraphPtr const & subgraph : m_Model->subgraphs)
577 {
578 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
579
580 size_t operatorIndex = 0;
581 for (OperatorPtr const & op : subgraph->operators)
582 {
583 try
584 {
telsoa01c577f2c2018-08-31 09:22:23 +0100585 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
586 auto builtinCode = opCodePtr->builtin_code;
587
588 if (builtinCode > tflite::BuiltinOperator_MAX)
589 {
590 throw ParseException(
591 boost::str(
592 boost::format("Operator code %1% is out of range 0-%2%. "
593 "subgraph:%3% operator idx:%4%. %5%") %
594 builtinCode %
595 tflite::BuiltinOperator_MAX %
596 subgraphIndex %
597 operatorIndex %
598 CHECK_LOCATION().AsString()));
599 }
600
601 // lookup and call the parser function
602 auto & parserFunction = m_ParserFunctions[builtinCode];
603 (this->*parserFunction)(subgraphIndex, operatorIndex);
604 }
605 catch (const ParseException& e)
606 {
607 failedToCreate = true;
608 std::stringstream errorString;
609
610 errorString << "Failed to parse operator #" << operatorIndex
611 << " within subgraph #" << subgraphIndex
612 << " error: " << e.what();
613 BOOST_LOG_TRIVIAL(error) << errorString.str();
614
615 errors << errorString.str() << "\n";
616 }
617 ++operatorIndex;
618 }
619
620 SetupInputLayers(subgraphIndex);
621 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200622 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100623
624 ++subgraphIndex;
625 }
626
627 if (failedToCreate)
628 {
629 // we can skip everything and let the outer exception handler deal with the error
630 throw ParseException(errors.str());
631 }
632
633 // establish the connections from the layer outputs to the inputs of the subsequent layers
634 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
635 {
636 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
637 {
638 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
639 {
640 for (size_t inputSlotIdx = 0;
641 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
642 ++inputSlotIdx)
643 {
644 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
645 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
646 }
647 }
648 }
649 }
650
651 return std::move(m_Network);
652}
653
654void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
655 size_t tensorIndex,
656 armnn::IOutputSlot* slot)
657{
658 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
659 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
660 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
661
662 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
663
664 // assuming there is only one producer for that tensor
665 if (tensorSlots.outputSlot != nullptr)
666 {
667 throw ParseException(boost::str(
668 boost::format("Another layer has already registered itself as the producer of "
669 "subgraph:%1% tensor:%2% %3%") %
670 subgraphIndex %
671 tensorIndex %
672 CHECK_LOCATION().AsString()));
673 }
674
675 tensorSlots.outputSlot = slot;
676}
677
678void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
679 size_t tensorIndex,
680 armnn::IInputSlot* slot)
681{
682 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
683 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
684 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
685
686 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
687 tensorSlots.inputSlots.push_back(slot);
688}
689
690void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
691{
692 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
693 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
694 //
695 auto opcodeIndex = operatorPtr->opcode_index;
696 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
697
698 throw ParseException(
699 boost::str(
700 boost::format("Operator not supported. "
701 "subgraph:%1% operator:%2% "
702 "opcode_index:%3% opcode:%4% / %5% %6%") %
703 subgraphIndex %
704 operatorIndex %
705 opcodeIndex %
706 opcode %
707 tflite::EnumNameBuiltinOperator(opcode) %
708 CHECK_LOCATION().AsString()));
709}
710
telsoa01c577f2c2018-08-31 09:22:23 +0100711void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
712{
713 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
714
715 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
716 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
717
718 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
719
720 Convolution2dDescriptor desc;
721 desc.m_BiasEnabled = false;
722 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
723 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000724 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100725
Kevin May83add212019-03-26 11:39:19 +0000726 CHECK_DILATION(options->dilation_h_factor, operatorIndex);
727 CHECK_DILATION(options->dilation_w_factor, operatorIndex);
728
telsoa01c577f2c2018-08-31 09:22:23 +0100729 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
730 CHECK_VALID_SIZE(inputs.size(), 2, 3);
731
732 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
733 CHECK_VALID_SIZE(outputs.size(), 1);
734
735 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
736 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
737
738 // assuming input is NHWC
739 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
740 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
741
742 // assuming the filter is OHWI : Output, H, W, Input
743 // which is essentially the same as NHWC
744 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
745 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
746
747 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
748 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
749
Matteo Martincigh747ef822018-12-18 09:26:39 +0000750 auto filterTensorAndData = CreateConstTensor(inputs[1],
751 filterTensorInfo,
752 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100753 armnn::IConnectableLayer* layer;
754
755 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
756
757 if (inputs.size() == 3)
758 {
759 desc.m_BiasEnabled = true;
760 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000761 auto biasTensorAndData = CreateConstTensor(inputs[2],
762 biasTensorInfo,
763 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100764 layer = m_Network->AddConvolution2dLayer(desc,
765 filterTensorAndData.first,
766 biasTensorAndData.first,
767 layerName.c_str());
768 }
769 else
770 {
771 layer = m_Network->AddConvolution2dLayer(desc,
772 filterTensorAndData.first,
773 layerName.c_str());
774 }
775
776 BOOST_ASSERT(layer != nullptr);
777
telsoa01c577f2c2018-08-31 09:22:23 +0100778 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000779 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100780
781 // register the input connection slots for the layer, connections are made after all layers have been created
782 // only the tensors for the inputs are relevant, exclude the const tensors
783 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000784 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100785
jimfly01c25411c2018-11-14 17:47:22 +0000786 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100787 // register the output connection slots for the layer, connections are made after all layers have been created
788 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
789 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
790}
791
792void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
793{
794 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
795
796 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
797 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
798
799 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
800
801 DepthwiseConvolution2dDescriptor desc;
802 desc.m_BiasEnabled = false;
803 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
804 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000805 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100806 // ACL only supports a depth (channel) multiplier of 1, it is not currently stored in the descriptor
807 CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1);
808
809 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
810 CHECK_VALID_SIZE(inputs.size(), 2, 3);
811 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
812 CHECK_VALID_SIZE(outputs.size(), 1);
813
Kevin May83add212019-03-26 11:39:19 +0000814 CHECK_DILATION(options->dilation_h_factor, operatorIndex);
815 CHECK_DILATION(options->dilation_w_factor, operatorIndex);
816
telsoa01c577f2c2018-08-31 09:22:23 +0100817 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
818 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
819
Matteo Martincigh747ef822018-12-18 09:26:39 +0000820 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100821 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
822 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000823
824 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100825 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
826 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
827
Matteo Martincigh747ef822018-12-18 09:26:39 +0000828 // Reshape weights as [ H, W, I, M ]
829 filterTensorInfo.SetShape({ filterHeight,
830 filterWidth,
831 inputTensorInfo.GetShape()[3],
832 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
833
834 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
835 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
836
telsoa01c577f2c2018-08-31 09:22:23 +0100837 CalcPadding(inputHeight, filterHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
838 CalcPadding(inputWidth, filterWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
839
Matteo Martincigh747ef822018-12-18 09:26:39 +0000840 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100841 armnn::IConnectableLayer* layer;
842 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
843
844 if (inputs.size() == 3)
845 {
846 desc.m_BiasEnabled = true;
847 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000848 auto biasTensorAndData = CreateConstTensor(inputs[2],
849 biasTensorInfo,
850 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100851 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
852 filterTensorAndData.first,
853 biasTensorAndData.first,
854 layerName.c_str());
855 }
856 else
857 {
858 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
859 filterTensorAndData.first,
860 layerName.c_str());
861 }
862 BOOST_ASSERT(layer != nullptr);
863
telsoa01c577f2c2018-08-31 09:22:23 +0100864 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000865 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100866
867 // register the input connection slots for the layer, connections are made after all layers have been created
868 // only the tensors for the inputs are relevant, exclude the const tensors
869 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000870 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100871
jimfly01c25411c2018-11-14 17:47:22 +0000872 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100873 // register the output connection slots for the layer, connections are made after all layers have been created
874 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
875 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
876}
877
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100878void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
879{
880 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
881}
882
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200883void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
884{
885 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
886
887 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
888 CHECK_VALID_SIZE(inputs.size(), 3);
889
890 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
891 CHECK_VALID_SIZE(outputs.size(), 1);
892
893 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
894 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
895
896 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
897 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
898
899 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
900 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
901
902 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
903 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
904
905 size_t step = 2;
906 std::vector<std::pair<unsigned int, unsigned int>> crops;
907 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
908 {
909 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
910 }
911
912 armnn::BatchToSpaceNdDescriptor desc;
913 desc.m_BlockShape = blockShape;
914 desc.m_Crops = crops;
915 desc.m_DataLayout = armnn::DataLayout::NHWC;
916
917 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
918
919 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
920 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
921
922 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
923
924 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
925 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
926
927 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
928 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
929}
930
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100931void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
932{
933 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
934}
935
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200936void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
937{
938 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
939
940 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
941 CHECK_VALID_SIZE(inputs.size(), 2);
942
943 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
944 CHECK_VALID_SIZE(outputs.size(), 1);
945
946 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
947 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
948
949 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
950 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
951
952 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
953 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
954
955 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
956 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
957 {
958 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
959 }
960 else
961 {
962 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
963 }
964
965 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
966 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
967}
968
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200969void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
970{
971 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
972
973 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
974 CHECK_VALID_SIZE(inputs.size(), 2);
975
976 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
977 CHECK_VALID_SIZE(outputs.size(), 1);
978
979 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
980 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
981
982 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
983 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
984
985 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
986 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
987
988 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
989 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
990 {
991 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
992 }
993 else
994 {
995 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
996 }
997
998 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
999 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1000}
1001
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001002void TfLiteParser::ParsePool(size_t subgraphIndex,
1003 size_t operatorIndex,
1004 PoolingAlgorithm algorithm)
1005{
1006 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1007
1008 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1009 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1010
1011 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1012
1013 std::string layerName;
1014
1015 switch (algorithm)
1016 {
1017 case PoolingAlgorithm::Average:
1018 layerName =
1019 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1020 break;
1021 case PoolingAlgorithm::Max:
1022 layerName =
1023 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1024 break;
1025 default:
1026 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1027 }
1028
1029 Pooling2dDescriptor desc;
1030
1031 desc.m_PoolType = algorithm;
1032 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1033 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1034 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1035 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1036 desc.m_PaddingMethod = PaddingMethod::Exclude;
1037 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001038 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001039
1040 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1041 CHECK_VALID_SIZE(inputs.size(), 1);
1042 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1043
1044 // assuming input is NHWC
1045 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1046 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1047
1048 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1049 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, options->padding);
1050
1051 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1052 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001053
1054 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1055
1056 BOOST_ASSERT(layer != nullptr);
1057
jimfly01c25411c2018-11-14 17:47:22 +00001058 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1059 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001060
1061 // register the input connection slots for the layer, connections are made after all layers have been created
1062 // only the tensors for the inputs are relevant, exclude the const tensors
1063 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001064 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001065
jimfly01c25411c2018-11-14 17:47:22 +00001066 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001067 // register the output connection slots for the layer, connections are made after all layers have been created
1068 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1069 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1070}
1071
telsoa01c577f2c2018-08-31 09:22:23 +01001072void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1073{
1074 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1075 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1076 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1077
1078 SoftmaxDescriptor desc;
1079 desc.m_Beta = options->beta;
1080
1081 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1082 CHECK_VALID_SIZE(inputs.size(), 1);
1083 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1084 CHECK_VALID_SIZE(outputs.size(), 1);
1085
1086 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1087 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1088
1089 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1090 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1091
1092 // register the input connection slots for the layer, connections are made after all layers have been created
1093 // only the tensors for the inputs are relevant, exclude the const tensors
1094 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1095 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1096
1097 // register the output connection slots for the layer, connections are made after all layers have been created
1098 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1099 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1100}
1101
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001102void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1103{
1104 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1105
1106 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1107 CHECK_VALID_SIZE(inputs.size(), 3);
1108
1109 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1110 CHECK_VALID_SIZE(outputs.size(), 1);
1111
1112 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1113 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1114
1115 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1116 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1117
1118 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1119 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1120
1121 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1122 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1123
1124 size_t step = 2;
1125 std::vector<std::pair<unsigned int, unsigned int>> padList;
1126 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1127 {
1128 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1129 }
1130
1131 armnn::SpaceToBatchNdDescriptor desc;
1132 desc.m_BlockShape = blockShape;
1133 desc.m_PadList = padList;
1134 desc.m_DataLayout = armnn::DataLayout::NHWC;
1135
1136 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1137
1138 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1139 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1140
1141 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1142
1143 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1144 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1145
1146 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1147 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1148}
1149
telsoa01c577f2c2018-08-31 09:22:23 +01001150armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1151 const armnn::TensorInfo & inputTensorInfo)
1152{
1153 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1154 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1155 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1156
1157 if (inputTensorInfo.GetNumDimensions() > 4)
1158 {
1159 std::stringstream ss;
1160 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1161 << " shape:" << inputTensorInfo.GetShape() << " "
1162 << CHECK_LOCATION().AsString();
1163 throw ParseException(ss.str());
1164 }
1165
1166 if (squeezeDims.empty())
1167 {
1168 squeezeDims.assign(dimensionSequence,
1169 dimensionSequence+inputTensorInfo.GetNumDimensions());
1170 }
1171
1172 std::vector<uint32_t> outputDims;
1173 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1174 {
1175 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1176 auto currentDimension = inputTensorInfo.GetShape()[i];
1177 if (skipSqueeze || currentDimension != 1)
1178 {
1179 outputDims.push_back(currentDimension);
1180 }
1181 }
1182
1183 if (outputDims.size() > 4)
1184 {
1185 std::stringstream ss;
1186 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1187 << " shape:" << inputTensorInfo.GetShape() << " "
1188 << CHECK_LOCATION().AsString();
1189 throw ParseException(ss.str());
1190 }
1191
1192 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1193 outputDims.data());
1194
1195 // we need to preserve the tensor type and the quantization data as well
1196 TensorInfo outTensorInfo = inputTensorInfo;
1197 outTensorInfo.SetShape(outShape);
1198
1199 return outTensorInfo;
1200}
1201
1202void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1203{
1204 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1205
1206 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1207 CHECK_VALID_SIZE(inputs.size(), 1);
1208
1209 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1210 CHECK_VALID_SIZE(outputs.size(), 1);
1211
1212 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1213 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1214
1215 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1216 armnn::TensorInfo outputTensorInfo =
1217 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1218 inputTensorInfo);
1219
1220 ReshapeDescriptor reshapeDesc;
1221 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1222
1223 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1224 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1225 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1226
1227 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1228 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1229
1230 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1231 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1232}
1233
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001234void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1235{
1236 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1237
1238 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1239 CHECK_VALID_SIZE(inputs.size(), 4);
1240
1241 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1242 CHECK_VALID_SIZE(outputs.size(), 1);
1243
1244 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1245 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1246
1247 StridedSliceDescriptor desc;
1248 desc.m_BeginMask = options->begin_mask;
1249 desc.m_EllipsisMask = options->ellipsis_mask;
1250 desc.m_EndMask = options->end_mask;
1251 desc.m_NewAxisMask = options->new_axis_mask;
1252 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1253 desc.m_DataLayout = armnn::DataLayout::NHWC;
1254
1255 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1256 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1257
1258 std::vector<int> begin(beginTensorInfo.GetNumElements());
1259 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1260
1261 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1262 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1263
1264 std::vector<int> end(endTensorInfo.GetNumElements());
1265 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1266
1267 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1268 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1269
1270 std::vector<int> stride(strideTensorInfo.GetNumElements());
1271 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1272
1273 desc.m_Begin = begin;
1274 desc.m_End = end;
1275 desc.m_Stride = stride;
1276
1277 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1278 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1279
1280 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1281 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1282
1283 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1284 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1285
1286 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1287 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1288}
1289
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001290void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1291{
1292 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1293
1294 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1295 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1296
1297 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1298 CHECK_VALID_SIZE(inputs.size(), 2);
1299
1300 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1301 CHECK_VALID_SIZE(outputs.size(), 1);
1302
1303 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1304 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1305
1306 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1307 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1308
1309 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1310 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1311
1312 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1313 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1314 {
1315 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1316 }
1317 else
1318 {
1319 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1320 }
1321
1322 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1323
1324 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1325 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1326}
1327
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001328void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1329{
1330 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1331
1332 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1333 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1334
1335 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1336 CHECK_VALID_SIZE(inputs.size(), 2);
1337
1338 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1339 CHECK_VALID_SIZE(outputs.size(), 1);
1340
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001341 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1342 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1343
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001344 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1345 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1346
1347 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1348 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1349
1350 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001351 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1352 {
1353 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1354 }
1355 else
1356 {
1357 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1358 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001359
1360 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1361
1362 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1363 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1364}
1365
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001366void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1367{
1368 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1369
1370 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1371 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1372
1373 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1374 CHECK_VALID_SIZE(inputs.size(), 2);
1375
1376 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1377 CHECK_VALID_SIZE(outputs.size(), 1);
1378
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001379 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1380 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1381
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001382 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1383 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1384
1385 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1386 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1387
1388 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001389 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1390 {
1391 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1392 }
1393 else
1394 {
1395 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1396 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001397
1398 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1399
1400 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1401 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1402}
1403
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001404void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1405{
1406 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1407
1408 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1409
1410 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1411 CHECK_VALID_SIZE(outputs.size(), 1);
1412
1413 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1414 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1415
1416 armnn::MeanDescriptor desc;
1417 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1418 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1419 desc.m_Axis = axis;
1420
1421 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1422 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1423
1424 desc.m_KeepDims =
1425 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1426 true : false;
1427
1428 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1429 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1430
1431 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1432
1433 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1434 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1435
1436 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1437 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1438}
1439
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001440void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1441{
1442 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1443
1444 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1445
1446 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1447 CHECK_VALID_SIZE(outputs.size(), 1);
1448
1449 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1450 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1451
1452 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1453 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1454
1455 size_t step = 2;
1456 armnn::PadDescriptor desc;
1457 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1458 {
1459 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1460 }
1461
1462 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1463 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1464
1465 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1466 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1467
1468 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1469 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1470
1471 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1472 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1473}
1474
Finn Williamsc42c3842019-01-22 14:18:11 +00001475
Sadik Armagan58f39192018-09-17 14:14:39 +01001476void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1477{
Finn Williamsc42c3842019-01-22 14:18:11 +00001478 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001479}
1480
1481void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1482{
Finn Williamsc42c3842019-01-22 14:18:11 +00001483 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1484}
Sadik Armagan58f39192018-09-17 14:14:39 +01001485
Finn Williamsc42c3842019-01-22 14:18:11 +00001486void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1487{
1488 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1489}
1490
Nina Drozd99851762019-04-09 09:37:38 +01001491void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1492{
1493 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1494}
1495
Finn Williamsc42c3842019-01-22 14:18:11 +00001496
1497void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1498{
1499 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001500 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1501 boost::ignore_unused(operatorPtr);
1502
1503 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1504 CHECK_VALID_SIZE(inputs.size(), 1);
1505
1506 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1507 CHECK_VALID_SIZE(outputs.size(), 1);
1508
Finn Williamsc42c3842019-01-22 14:18:11 +00001509 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001510 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001511 activationDesc.m_Function = activationType;
1512
1513 switch (activationType)
1514 {
1515 case ActivationFunction::ReLu:
1516 {
1517 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1518 break;
1519 }
1520 case ActivationFunction::BoundedReLu:
1521 {
1522 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1523 activationDesc.m_A = 6.0f;
1524 activationDesc.m_B = 0.0f;
1525 break;
1526 }
1527 case ActivationFunction::Sigmoid:
1528 {
1529 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1530 break;
1531 }
Nina Drozd99851762019-04-09 09:37:38 +01001532 case ActivationFunction::TanH:
1533 {
1534 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1535 activationDesc.m_A = 1.0f;
1536 activationDesc.m_B = 1.0f;
1537 break;
1538 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001539 default:
1540 {
1541 throw ParseException(
1542 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1543 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1544 }
1545 }
1546
1547 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001548
1549 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1550 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1551
1552 // register the input connection slots for the layer, connections are made after all layers have been created
1553 // only the tensors for the inputs are relevant, exclude the const tensors
1554 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1555 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1556
1557 // register the output connection slots for the layer, connections are made after all layers have been created
1558 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1559 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1560}
Sadikb94967b2018-09-19 15:30:00 +01001561armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1562 const std::vector<int32_t> & targetDimsIn)
1563{
1564 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1565 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1566
1567 if (stretchDim != targetDimsIn.end())
1568 {
1569 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1570 {
1571 throw ParseException(
1572 boost::str(
1573 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1574 }
1575
1576 auto targetNumElements =
1577 boost::numeric_cast<unsigned int>(
1578 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1579
1580 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1581 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1582 }
1583
1584 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1585
1586 TensorInfo reshapeInfo = inputTensorInfo;
1587 reshapeInfo.SetShape(outputShape);
1588
1589 return reshapeInfo;
1590}
1591
1592void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1593{
1594 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1595
1596 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001597
1598 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1599 CHECK_VALID_SIZE(outputs.size(), 1);
1600
1601 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1602 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1603
1604 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001605 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1606 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001607 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1608
kevmay0171972a82018-12-17 14:28:03 +00001609 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001610 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1611 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001612 {
1613 std::stringstream ss;
1614 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001615 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001616 << " does not equal output shape "
1617 << actualOutputTensorInfo.GetShape()
1618 << ": "
1619 << CHECK_LOCATION().AsString();
1620 throw ParseException(ss.str());
1621 }
1622
Sadikb94967b2018-09-19 15:30:00 +01001623 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001624 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001625
1626 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1627 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001628 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001629
1630 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1631 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1632
1633 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1634 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1635}
1636
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001637void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1638{
1639 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1640
1641 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1642 CHECK_VALID_SIZE(inputs.size(), 2);
1643
1644 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1645 CHECK_VALID_SIZE(outputs.size(), 1);
1646
1647 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1648
1649 // Data for the parsed tensor args (size) must be stored locally.
1650 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1651
1652 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1653 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1654
1655 ResizeBilinearDescriptor desc;
1656 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
1657 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1658 desc.m_DataLayout = armnn::DataLayout::NHWC;
1659
1660 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
1661 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, layerName.c_str());
1662
1663 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1664 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1665
1666 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1667 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1668
1669 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1670 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1671}
1672
Sadik Armagan479045b2018-10-01 11:51:37 +01001673void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1674{
1675 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1676
1677 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1678 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1679
1680 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1681
1682 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1683 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1684 CHECK_VALID_SIZE(outputs.size(), 1);
1685
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001686 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1687 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001688
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001689 const unsigned int concatDimInput = static_cast<unsigned int>(
1690 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001691
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001692 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1693 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001694
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001695 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001696
1697 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1698 {
1699 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1700
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001701 // This set up concatDescriptor view origin
1702 armnnUtils::ProcessConcatInputTensorInfo(
1703 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001704 }
1705
1706 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
1707 IConnectableLayer* layer = m_Network->AddMergerLayer(concatDescriptor, layerName.c_str());
1708
1709 BOOST_ASSERT(layer != nullptr);
1710
1711 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1712 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001713
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001714 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001715
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001716 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001717
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001718 // add fused activation layer
1719 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001720
Sadik Armagan479045b2018-10-01 11:51:37 +01001721 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1722 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1723}
1724
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001725void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1726{
1727 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1728
1729 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1730 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1731
1732 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1733
1734 FullyConnectedDescriptor desc;
1735 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001736 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001737
1738 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1739 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1740 CHECK_VALID_SIZE(outputs.size(), 1);
1741
1742 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1743
1744 // Fully Connected Layer accepts two dimensional weights input
1745 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1746 if (weightsDimension != 2)
1747 {
1748 throw ParseException(
1749 boost::str(
1750 boost::format(
1751 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1752 "Node %2%")
1753 % weightsDimension
1754 % CHECK_LOCATION().AsString()));
1755 }
1756
Matteo Martincigh747ef822018-12-18 09:26:39 +00001757 auto filterTensorAndData = CreateConstTensor(inputs[1],
1758 filterTensorInfo,
1759 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001760 armnn::IConnectableLayer* layer;
1761 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1762
1763 if (inputs.size() == 3)
1764 {
1765 desc.m_BiasEnabled = true;
1766 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001767 auto biasTensorAndData = CreateConstTensor(inputs[2],
1768 biasTensorInfo,
1769 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001770 layer = m_Network->AddFullyConnectedLayer(desc,
1771 filterTensorAndData.first,
1772 biasTensorAndData.first,
1773 layerName.c_str());
1774 }
1775 else
1776 {
1777 layer = m_Network->AddFullyConnectedLayer(desc,
1778 filterTensorAndData.first,
1779 layerName.c_str());
1780 }
1781 BOOST_ASSERT(layer != nullptr);
1782
1783 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1784 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1785
1786 // register the input connection slot for the layer
1787 // only the tensors for the inputs are relevant, exclude the const tensors
1788 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1789 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1790
1791 // we need to add the activation layer and fortunately we don't need to care about the data layout
1792 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1793 options->fused_activation_function);
1794 // register the output connection slots for the layer, connections are made after all layers have been created
1795 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1796 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1797}
1798
keidav011b3e2ea2019-02-21 10:07:37 +00001799void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1800{
1801 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1802
1803 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1804
1805 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1806 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1807 CHECK_VALID_SIZE(outputs.size(), 4);
1808
1809 // Obtain custom options from flexbuffers
1810 auto custom_options = operatorPtr->custom_options;
1811 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1812
1813 // Obtain descriptor information from tf lite
1814 DetectionPostProcessDescriptor desc;
1815 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1816 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1817 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1818 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1819 desc.m_NumClasses = m["num_classes"].AsUInt32();
1820 desc.m_ScaleH = m["h_scale"].AsFloat();
1821 desc.m_ScaleW = m["w_scale"].AsFloat();
1822 desc.m_ScaleX = m["x_scale"].AsFloat();
1823 desc.m_ScaleY = m["y_scale"].AsFloat();
1824
keidav0107d58c72019-02-26 11:57:39 +00001825 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00001826 {
keidav0107d58c72019-02-26 11:57:39 +00001827 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00001828 }
1829 if (!(m["detections_per_class"].IsNull()))
1830 {
1831 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1832 }
1833
1834 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1835 {
1836 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1837 "must be positive and less than or equal to 1.");
1838 }
1839
1840 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
1841 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
1842 armnn::Optional<armnn::PermutationVector&>());
1843
1844 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
1845 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
1846 layerName.c_str());
1847
1848 BOOST_ASSERT(layer != nullptr);
1849
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001850 // The model does not specify the output shapes.
1851 // The output shapes are calculated from the max_detection and max_classes_per_detection.
1852 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
1853 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
1854 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
1855 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
1856 m_OverridenOutputShapes.push_back({ 1 });
1857
keidav011b3e2ea2019-02-21 10:07:37 +00001858 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
1859 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001860 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00001861 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
1862 }
1863
1864 // Register the input connection slots for the layer, connections are made after all layers have been created
1865 // only the tensors for the inputs are relevant, exclude the const tensors
1866 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1867 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1868
1869 // Register the output connection slots for the layer, connections are made after all layers have been created
1870 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1871 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
1872 outputTensorIndexes[1],
1873 outputTensorIndexes[2],
1874 outputTensorIndexes[3]});
1875}
1876
Nina Drozd200e3802019-04-15 09:47:39 +01001877void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
1878{
1879 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1880
1881 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1882 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
1883
1884 // This unpackAxis indicates the axis to unpack
1885 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
1886
1887 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1888 CHECK_VALID_SIZE(inputs.size(), 1);
1889
1890 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1891 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
1892 // If num is not defined, automatically infer from the length of the dimension axis.
1893 if(unpackNum == 0)
1894 {
1895 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
1896 }
1897
1898 // If unpack number cannot be inferred and is still zero, throw ParseException.
1899 if(unpackNum == 0)
1900 {
1901 throw ParseException("Number to unpack must greater than zero.");
1902 }
1903
1904 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1905 CHECK_VALID_SIZE(outputs.size(), unpackNum);
1906
1907 auto inputDimSize = inputTensorInfo.GetNumDimensions();
1908 std::vector<unsigned int> unpackDimSizes(inputDimSize);
1909
1910 // Add current input shape to unpackDimSizes
1911 for (unsigned int i = 0; i < inputDimSize; ++i)
1912 {
1913 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
1914 }
1915
1916 if (unpackDimSizes[unpackAxis] != unpackNum)
1917 {
1918 throw ParseException("Number to unpack must be the same as length of the dimension to "
1919 "unpack along.");
1920 }
1921
1922 unpackDimSizes[unpackAxis] /= unpackNum;
1923
1924 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
1925 for (unsigned int j = 0; j < unpackNum; ++j)
1926 {
1927 // Set the size of the views.
1928 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
1929 {
1930 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
1931 }
1932 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
1933 }
1934
1935 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
1936 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
1937
1938 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1939 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1940
1941 TensorShape outShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
1942 unpackDimSizes.data());
1943
1944 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
1945 {
1946 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(outShape,
1947 inputTensorInfo.GetDataType()));
1948 }
1949
1950 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1951 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1952}
1953
Nina Drozd0324f482019-04-08 10:52:10 +01001954void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
1955{
1956 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1957
1958 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1959 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
1960
1961 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
1962
Nina Drozd200e3802019-04-15 09:47:39 +01001963 // If number of splits cannot be inferred and is zero, throw ParseException.
1964 if(numSplits == 0)
1965 {
1966 throw ParseException("Number to splits must greater than zero.");
1967 }
1968
Nina Drozd0324f482019-04-08 10:52:10 +01001969 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1970 CHECK_VALID_SIZE(inputs.size(), 2);
1971 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1972 CHECK_VALID_SIZE(outputs.size(), numSplits);
1973
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01001974 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
1975 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01001976
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01001977 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
1978 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
1979 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
1980
1981 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
1982 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01001983
1984 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
1985 if (splitDim == 0 || splitDim == 2)
1986 {
1987 throw ParseException(
1988 boost::str(
1989 boost::format(
1990 "Dimension %1% for split is not supported by Armnn. %2%")
1991 % splitDim
1992 % CHECK_LOCATION().AsString()));
1993 }
1994
1995 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01001996 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01001997 {
1998 throw ParseException(
1999 boost::str(
2000 boost::format(
2001 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002002 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002003 % inputTensorInfo.GetNumDimensions()
2004 % MaxNumOfTensorDimensions
2005 % CHECK_LOCATION().AsString()));
2006 }
2007
2008 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2009
2010 // Add current input shape to splitterDimSizes
2011 for (unsigned int i = 0; i < inputDimSize; ++i)
2012 {
2013 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2014 }
2015
2016 if (splitterDimSizes[splitDim] % numSplits != 0)
2017 {
2018 throw ParseException("Number of splits must evenly divide the dimension");
2019 }
2020 splitterDimSizes[splitDim] /= numSplits;
2021
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002022 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002023 for (unsigned int j = 0; j < numSplits; ++j)
2024 {
2025 // Set the size of the views.
2026 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2027 {
2028 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2029 }
2030 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2031 }
2032
2033 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2034 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2035
2036 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002037 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002038
2039 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2040 splitterDimSizes.data());
2041
2042 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2043 {
2044 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(outShape,
2045 inputTensorInfo.GetDataType()));
2046 }
2047
2048 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2049 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2050}
2051
Sadik Armagan58f39192018-09-17 14:14:39 +01002052armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2053 unsigned int outputSlot,
2054 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002055{
2056 ActivationDescriptor activationDesc;
2057 std::string layerName = prevLayer->GetName();
2058
2059 switch(activationType)
2060 {
2061 case tflite::ActivationFunctionType_NONE:
2062 {
2063 // this is a no-op: return previous layer
2064 return prevLayer;
2065 }
2066 case tflite::ActivationFunctionType_RELU:
2067 {
2068 activationDesc.m_Function = ActivationFunction::ReLu;
2069 layerName += ":RELU";
2070 break;
2071 }
2072 case tflite::ActivationFunctionType_RELU6:
2073 {
2074 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2075 activationDesc.m_A = 6.0f;
2076 activationDesc.m_B = 0.0f;
2077 layerName += ":RELU6";
2078 break;
2079 }
2080 case tflite::ActivationFunctionType_TANH:
2081 {
2082 activationDesc.m_Function = ActivationFunction::TanH;
2083 activationDesc.m_A = 1.0f;
2084 activationDesc.m_B = 1.0f;
2085 layerName += ":TANH";
2086 break;
2087 }
2088
2089 // I only put these here as a reminder what others we could support
2090 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2091 case tflite::ActivationFunctionType_SIGN_BIT:
2092 default:
2093 {
2094 throw ParseException(
2095 boost::str(
2096 boost::format("TfLite parser doesn't suppport fused activation: "
2097 "%1%/%2% %3% ") %
2098 activationType %
2099 tflite::EnumNameActivationFunctionType(activationType) %
2100 CHECK_LOCATION().AsString()));
2101
2102 }
2103 }
2104
2105 IConnectableLayer* activationLayer =
2106 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2107
2108 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2109 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2110 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2111 return activationLayer;
2112}
2113
2114TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2115{
2116 if (fileName == nullptr)
2117 {
2118 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2119 CHECK_LOCATION().AsString()));
2120 }
2121 boost::system::error_code errorCode;
2122 boost::filesystem::path pathToFile(fileName);
2123 if (!boost::filesystem::exists(pathToFile, errorCode))
2124 {
2125 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2126 fileName %
2127 errorCode %
2128 CHECK_LOCATION().AsString()));
2129 }
2130 std::ifstream file(fileName, std::ios::binary);
2131 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2132 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2133 fileContent.size());
2134}
2135
2136TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2137{
2138 if (binaryContent == nullptr)
2139 {
2140 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2141 CHECK_LOCATION().AsString()));
2142 }
2143 flatbuffers::Verifier verifier(binaryContent, len);
2144 if (verifier.VerifyBuffer<tflite::Model>() == false)
2145 {
2146 throw ParseException(
2147 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2148 "flatbuffers format. size:%1% %2%") %
2149 len %
2150 CHECK_LOCATION().AsString()));
2151 }
2152 return tflite::UnPackModel(binaryContent);
2153}
2154
2155TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2156 size_t subgraphIndex,
2157 size_t operatorIndex)
2158{
2159 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2160
2161 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
2162 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
2163
2164 size_t inputCount = operatorPtr->inputs.size();
2165 TensorRawPtrVector result(inputCount);
2166 for (size_t i=0; i<inputCount; ++i)
2167 {
2168 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
2169 result[i] = subGraphPtr->tensors[inputId].get();
2170 }
2171 return result;
2172}
2173
2174TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2175 size_t subgraphIndex,
2176 size_t operatorIndex)
2177{
2178 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2179
2180 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
2181 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
2182
2183 size_t outputCount = operatorPtr->outputs.size();
2184 TensorRawPtrVector result(outputCount);
2185 for (size_t i=0; i<outputCount; ++i)
2186 {
2187 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2188 CHECK_TENSOR(model, subgraphIndex, outputId);
2189 result[i] = subGraphPtr->tensors[outputId].get();
2190 }
2191 return result;
2192}
2193
2194TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2195 size_t subgraphIndex)
2196{
2197 CHECK_SUBGRAPH(model, subgraphIndex);
2198 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
2199
2200 size_t inputCount = subGraphPtr->inputs.size();
2201 TensorIdRawPtrVector result(inputCount);
2202 for (size_t i=0; i<inputCount; ++i)
2203 {
2204 uint32_t inputId = CHECKED_NON_NEGATIVE(subGraphPtr->inputs[i]);
2205 CHECK_TENSOR(model, subgraphIndex, inputId);
2206 result[i] = std::make_pair(inputId, subGraphPtr->tensors[inputId].get());
2207 }
2208 return result;
2209}
2210
2211TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2212 size_t subgraphIndex)
2213{
2214 CHECK_SUBGRAPH(model, subgraphIndex);
2215 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
2216
2217 size_t outputCount = subGraphPtr->outputs.size();
2218 TensorIdRawPtrVector result(outputCount);
2219 for (size_t i=0; i<outputCount; ++i)
2220 {
2221 uint32_t outputId = CHECKED_NON_NEGATIVE(subGraphPtr->outputs[i]);
2222 result[i] = std::make_pair(outputId, subGraphPtr->tensors[outputId].get());
2223 }
2224 return result;
2225}
2226
2227std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2228 size_t subgraphIndex,
2229 size_t operatorIndex)
2230{
2231 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2232 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
2233 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
2234 return operatorPtr->inputs;
2235}
2236
2237std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2238 size_t subgraphIndex,
2239 size_t operatorIndex)
2240{
2241 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2242 const auto & subGraphPtr = model->subgraphs[subgraphIndex];
2243 const auto & operatorPtr = subGraphPtr->operators[operatorIndex];
2244 return operatorPtr->outputs;
2245}
2246
2247void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2248 size_t operatorIndex,
2249 IConnectableLayer* layer,
2250 const std::vector<unsigned int>& tensorIndexes)
2251{
2252 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2253 BOOST_ASSERT(layer != nullptr);
2254 if (tensorIndexes.size() != layer->GetNumInputSlots())
2255 {
2256 throw ParseException(
2257 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2258 " for subgraph:%3% operator index:%4% %5%") %
2259 tensorIndexes.size() %
2260 layer->GetNumInputSlots() %
2261 subgraphIndex %
2262 operatorIndex %
2263 CHECK_LOCATION().AsString()));
2264 }
2265
2266 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2267 {
2268 unsigned int tensorIndex = tensorIndexes[slotIndex];
2269 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2270 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2271 }
2272}
2273
2274void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2275 size_t operatorIndex,
2276 IConnectableLayer* layer,
2277 const std::vector<unsigned int>& tensorIndexes)
2278{
2279 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2280 BOOST_ASSERT(layer != nullptr);
2281 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2282 {
2283 throw ParseException(
2284 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2285 " for subgraph:%3% operator index:%4% %5%") %
2286 tensorIndexes.size() %
2287 layer->GetNumOutputSlots() %
2288 subgraphIndex %
2289 operatorIndex %
2290 CHECK_LOCATION().AsString()));
2291 }
2292
2293 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2294 {
2295 unsigned int tensorIndex = tensorIndexes[slotIndex];
2296 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2297 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2298 }
2299}
2300
2301void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2302{
2303 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2304
2305 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2306 for (auto const & tensorIdAndPtr : inputs)
2307 {
2308 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2309 IConnectableLayer* layer =
2310 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2311
2312 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2313 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2314
2315 RegisterOutputSlots(subgraphIndex,
2316 VIRTUAL_OPERATOR_ID,
2317 layer,
2318 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2319 }
2320}
2321
2322void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2323{
2324 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2325
2326 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2327 for (auto const & tensorIdAndPtr : outputs)
2328 {
2329 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2330 IConnectableLayer* layer =
2331 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2332
2333 RegisterInputSlots(subgraphIndex,
2334 VIRTUAL_OPERATOR_ID,
2335 layer,
2336 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2337 }
2338}
2339
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002340void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2341{
2342 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2343
2344 const auto & subGraphPtr = m_Model->subgraphs[subgraphIndex];
2345 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2346 {
2347 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2348 {
2349 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2350 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2351 {
2352 TensorRawPtr tensorPtr = subGraphPtr->tensors[tensorIndex].get();
2353 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2354 auto tensorAndData = CreateConstTensor(tensorPtr,
2355 tensorInfo,
2356 armnn::Optional<armnn::PermutationVector&>());
2357
2358 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2359 IConnectableLayer *layer =
2360 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2361
2362 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2363 RegisterOutputSlots(subgraphIndex,
2364 VIRTUAL_OPERATOR_ID,
2365 layer,
2366 { tensorIndex });
2367
2368 }
2369 }
2370 }
2371}
2372
telsoa01c577f2c2018-08-31 09:22:23 +01002373// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2374TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2375{
2376 CHECK_BUFFER(model, bufferIndex);
2377 return model->buffers[bufferIndex].get();
2378}
2379
Matteo Martincigh747ef822018-12-18 09:26:39 +00002380template<typename T>
2381std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2382TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2383 TfLiteParser::TensorRawPtr tensorPtr,
2384 armnn::TensorInfo& tensorInfo,
2385 armnn::Optional<armnn::PermutationVector&> permutationVector)
2386{
2387 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2388 tensorPtr,
2389 tensorInfo,
2390 permutationVector);
2391 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2392 return std::make_pair(constData.first, std::move(storage));
2393}
2394
telsoa01c577f2c2018-08-31 09:22:23 +01002395std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2396TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002397 armnn::TensorInfo& tensorInfo,
2398 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002399{
2400 CHECK_TENSOR_PTR(tensorPtr);
2401 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2402 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2403
2404 switch (tensorInfo.GetDataType())
2405 {
2406 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002407 return CreateConstTensorAndStoreData<float>(bufferPtr,
2408 tensorPtr,
2409 tensorInfo,
2410 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002411 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002412 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2413 tensorPtr,
2414 tensorInfo,
2415 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002416 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002417 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2418 tensorPtr,
2419 tensorInfo,
2420 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002421 default:
2422 {
2423 std::stringstream errString;
2424 errString << "Unexpected datatype when creating const tensor: "
2425 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2426 << " shape:" << tensorInfo.GetShape()
2427 << CHECK_LOCATION().AsString();
2428 throw ParseException(errString.str());
2429 }
2430 }
2431}
2432
2433BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2434 const std::string& name) const
2435{
2436 CHECK_SUBGRAPH(m_Model, subgraphId);
2437 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2438 for (auto const & input : inputs)
2439 {
2440 if (input.second->name == name)
2441 {
2442 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2443 return std::make_pair(bindingId, ToTensorInfo(input.second));
2444 }
2445 }
2446
2447 std::stringstream bindings;
2448 for (auto const & input : inputs)
2449 {
2450 bindings << "'" << input.second->name << "' ";
2451 }
2452
2453 throw ParseException(
2454 boost::str(
2455 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2456 "Possible inputs are: [%3%] %4%") %
2457 subgraphId %
2458 name %
2459 bindings.str() %
2460 CHECK_LOCATION().AsString()));
2461}
2462
2463BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2464 const std::string& name) const
2465{
2466 CHECK_SUBGRAPH(m_Model, subgraphId);
2467 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002468 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002469 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002470 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002471 if (output.second->name == name)
2472 {
2473 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002474 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2475 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2476 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002477 }
2478 }
2479
2480 std::stringstream bindings;
2481 for (auto const & output : outputs)
2482 {
2483 bindings << "'" << output.second->name << "' ";
2484 }
2485
2486 throw ParseException(
2487 boost::str(
2488 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2489 "Possible outputs are: [%3%] %4%") %
2490 subgraphId %
2491 name %
2492 bindings.str() %
2493 CHECK_LOCATION().AsString()));
2494}
2495
2496size_t TfLiteParser::GetSubgraphCount() const
2497{
2498 return m_Model->subgraphs.size();
2499}
2500
2501std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2502{
2503 CHECK_SUBGRAPH(m_Model, subgraphId);
2504 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2505 std::vector<std::string> result;
2506 result.reserve(inputs.size());
2507 for (auto const & input : inputs)
2508 {
2509 result.push_back(input.second->name);
2510 }
2511 return result;
2512}
2513
2514std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2515{
2516 CHECK_SUBGRAPH(m_Model, subgraphId);
2517 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2518 std::vector<std::string> result;
2519 result.reserve(outputs.size());
2520 for (auto const & output : outputs)
2521 {
2522 result.push_back(output.second->name);
2523 }
2524 return result;
2525}
2526
2527ITfLiteParser* ITfLiteParser::CreateRaw()
2528{
2529 return new TfLiteParser();
2530}
2531
2532ITfLiteParserPtr ITfLiteParser::Create()
2533{
2534 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2535}
2536
2537void ITfLiteParser::Destroy(ITfLiteParser* parser)
2538{
2539 delete parser;
2540}
2541
2542TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2543: m_FloatData(std::move(data))
2544, m_Uint8Data(nullptr)
2545, m_Int32Data(nullptr)
2546{
2547}
2548
2549TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2550: m_FloatData(nullptr)
2551, m_Uint8Data(std::move(data))
2552, m_Int32Data(nullptr)
2553{
2554}
2555
2556TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2557: m_FloatData(nullptr)
2558, m_Uint8Data(nullptr)
2559, m_Int32Data(std::move(data))
2560{
2561}
2562
2563} // armnnTfLiteParser