blob: fdb38122c21c3aca9fa43e5b17a9792457bd10d2 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026
27#include <fstream>
28#include <algorithm>
29#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010030#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000031#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010032
33using namespace armnn;
34using armnn::CheckLocation;
35namespace armnnTfLiteParser
36{
37namespace
38{
jimfly01c25411c2018-11-14 17:47:22 +000039
telsoa01c577f2c2018-08-31 09:22:23 +010040const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
41
42void CheckSubgraph(const TfLiteParser::ModelPtr & model,
43 size_t subgraphIndex,
44 const CheckLocation & location)
45{
46 if (model.get() == nullptr)
47 {
48 throw ParseException(
49 boost::str(
50 boost::format("%1% was called with invalid (null) model. "
51 "Possible reason is that the model is not yet loaded and Unpack(ed). "
52 "subgraph:%2% at %3%") %
53 location.m_Function %
54 subgraphIndex %
55 location.FileLine()));
56 }
57 else if (subgraphIndex >= model->subgraphs.size())
58 {
59 throw ParseException(
60 boost::str(
61 boost::format("%1% was called with an invalid subgraph index. "
62 "subgraph:%2% at %3%") %
63 location.m_Function %
64 subgraphIndex %
65 location.FileLine()));
66 }
67}
68
69#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
70 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
71
72void CheckModel(const TfLiteParser::ModelPtr & model,
73 size_t subgraphIndex,
74 size_t operatorIndex,
75 const CheckLocation & location)
76{
77 if (model.get() == nullptr)
78 {
79 throw ParseException(
80 boost::str(
81 boost::format("%1% was called with invalid (null) model. "
82 "Possible reason is that the model is not yet loaded and Unpack(ed). "
83 "subgraph:%2% operator:%3% at %4%") %
84 location.m_Function %
85 subgraphIndex %
86 operatorIndex %
87 location.FileLine()));
88 }
89 else if (subgraphIndex >= model->subgraphs.size())
90 {
91 throw ParseException(
92 boost::str(
93 boost::format("%1% was called with an invalid subgraph index. "
94 "subgraph:%2% operator:%3% at %4%") %
95 location.m_Function %
96 subgraphIndex %
97 operatorIndex %
98 location.FileLine()));
99 }
100 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
101 operatorIndex != VIRTUAL_OPERATOR_ID)
102 {
103 throw ParseException(
104 boost::str(
105 boost::format("%1% was called with an invalid operator index. "
106 "subgraph:%2% operator:%3% at %4%") %
107 location.m_Function %
108 subgraphIndex %
109 operatorIndex %
110 location.FileLine()));
111 }
112}
113
114#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
115 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
116
117void CheckTensor(const TfLiteParser::ModelPtr & model,
118 size_t subgraphIndex,
119 size_t tensorIndex,
120 const CheckLocation & location)
121{
122 // not checking model, because I assume CHECK_MODEL already run
123 // and checked that. An assert would do.
124 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
125
126 // also subgraph index should be checked by CHECK_MODEL so
127 // I only add an assert here
128 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
129
130 // the tensor index is the only one to check here
131 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
132 {
133 throw ParseException(
134 boost::str(
135 boost::format("%1% was called with an invalid tensor index. "
136 "subgraph:%2% tensor:%3% at %4%") %
137 location.m_Function %
138 subgraphIndex %
139 tensorIndex %
140 location.FileLine()));
141 }
142}
143
144#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
145 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
146
147void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
148 const CheckLocation & location)
149{
150 if (rawPtr == nullptr)
151 {
152 throw ParseException(
153 boost::str(
154 boost::format("%1% was called with a null tensor pointer. "
155 "at %2%") %
156 location.m_Function %
157 location.FileLine()));
158
159 }
160}
161
162#define CHECK_TENSOR_PTR(TENSOR_PTR) \
163 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
164
165void CheckBuffer(const TfLiteParser::ModelPtr & model,
166 size_t bufferIndex,
167 const CheckLocation & location)
168{
169 if (model.get() == nullptr)
170 {
171 throw ParseException(
172 boost::str(
173 boost::format("%1% was called with invalid (null) model. "
174 "Possible reason is that the model is not yet loaded and Unpack(ed). "
175 "buffer:%2% at %3%") %
176 location.m_Function %
177 bufferIndex %
178 location.FileLine()));
179 }
180 else if (bufferIndex >= model->buffers.size())
181 {
182 throw ParseException(
183 boost::str(
184 boost::format("%1% was called with an invalid buffer index. "
185 "buffer index:%2% at %3%") %
186 location.m_Function %
187 bufferIndex %
188 location.FileLine()));
189 }
190 else if (model->buffers[bufferIndex].get() == nullptr)
191 {
192 throw ParseException(
193 boost::str(
194 boost::format("The buffer #%1% is null. %3%") %
195 bufferIndex %
196 location.AsString()));
197 }
198}
199
200#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
201 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
202
203void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
204 const armnn::TensorInfo & tensorInfo,
205 uint32_t bufferId,
206 const CheckLocation & location)
207{
208 if (bufferPtr == nullptr)
209 {
210 throw ParseException(
211 boost::str(
212 boost::format("BufferPtr is null for buffer:%1%. %2%") %
213 bufferId %
214 location.AsString()));
215 }
216 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
217 tensorInfo.GetNumBytes() > bufferPtr->data.size())
218 {
219 std::stringstream ss;
220 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
221 << "For tensor: " << tensorInfo.GetShape()
222 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
223 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
224 throw ParseException(ss.str());
225 }
226}
227
228#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
229 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
230
231bool IsActivationSupported(tflite::ActivationFunctionType activationType)
232{
233 switch(activationType)
234 {
235 case tflite::ActivationFunctionType_NONE:
236 case tflite::ActivationFunctionType_RELU:
237 case tflite::ActivationFunctionType_RELU6:
238 case tflite::ActivationFunctionType_TANH:
239 {
240 return true;
241 }
242 default:
243 {
244 return false;
245 }
246 }
247}
248
249#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
250 do { \
251 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
252 { \
253 throw ParseException( \
254 boost::str( \
255 boost::format("TfLite parser doesn't suppport fused activation: " \
256 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
257 OPTION->fused_activation_function % \
258 tflite::EnumNameActivationFunctionType(\
259 OPTION->fused_activation_function) % \
260 __func__ % \
261 SUBGRAPH_INDEX % \
262 OPERATOR_INDEX % \
263 CHECK_LOCATION().FileLine())); \
264 } \
265 } while(false)
266
267
268std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
269{
270 std::vector<unsigned int> result;
271 result.reserve(in.size());
272 for (auto & i : in)
273 {
274 result.push_back(CHECKED_NON_NEGATIVE(i));
275 }
276 return result;
277}
278
279void CalcPadding(uint32_t inputSize,
280 uint32_t filterSize,
281 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100282 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100283 uint32_t& paddingFront,
284 uint32_t& paddingBack,
285 tflite::Padding padding)
286{
287 paddingFront = 0;
288 paddingBack = 0;
289 if (padding == tflite::Padding_SAME)
290 {
291 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100292 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
293 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100294 if (temp > inputSize)
295 {
296 paddingFront = (temp - inputSize) / 2;
297 paddingBack = (temp - inputSize) - paddingFront;
298 }
299 }
300}
301
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000302armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100303{
304 armnn::DataType type;
305 CHECK_TENSOR_PTR(tensorPtr);
306
307 switch (tensorPtr->type)
308 {
309 case tflite::TensorType_UINT8:
310 type = armnn::DataType::QuantisedAsymm8;
311 break;
312 case tflite::TensorType_FLOAT32:
313 type = armnn::DataType::Float32;
314 break;
315 case tflite::TensorType_INT32:
316 type = armnn::DataType::Signed32;
317 break;
318
319 default:
320 {
321 CheckLocation location = CHECK_LOCATION();
322 throw ParseException(
323 boost::str(
324 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
325 tensorPtr->type %
326 tflite::EnumNameTensorType(tensorPtr->type) %
327 tensorPtr->name %
328 location.AsString()));
329 }
330 }
331
332 float quantizationScale = 0.0f;
333 int32_t quantizationOffset = 0;
334
335 if (tensorPtr->quantization.get())
336 {
337 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
338 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
339
340 if (tensorPtr->quantization->scale.size() == 1)
341 {
342 quantizationScale = tensorPtr->quantization->scale[0];
343 }
344 if (tensorPtr->quantization->zero_point.size() == 1)
345 {
346 // NOTE: we lose precision here when converting from 64 bit to 32
347 // but this is what we support at the monent in ArmNN
348 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
349 }
350 }
351
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100352 std::vector<unsigned int> safeShape = shapes;
353 if (safeShape.size() == 0)
354 {
355 safeShape.push_back(1);
356 }
357
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // two statements (on purpose) for easier debugging:
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100359 armnn::TensorInfo result(static_cast<unsigned int>(safeShape.size()),
360 safeShape.data(),
telsoa01c577f2c2018-08-31 09:22:23 +0100361 type,
362 quantizationScale,
363 quantizationOffset);
364 return result;
365}
366
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000367armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
368{
369 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
370 return ToTensorInfo(tensorPtr, dimensions);
371}
372
telsoa01c577f2c2018-08-31 09:22:23 +0100373template<typename T>
374std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
375CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
376 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000377 armnn::TensorInfo& tensorInfo,
378 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100379{
380 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
381 BOOST_ASSERT_MSG(bufferPtr != nullptr,
382 boost::str(
383 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
384
385 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000386
387 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
388 {
389 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000390 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
391 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000392 }
393 else
394 {
395 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
396 }
397
telsoa01c577f2c2018-08-31 09:22:23 +0100398 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
399}
400
telsoa01c577f2c2018-08-31 09:22:23 +0100401armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
402{
403 // generate the binding id by shifting the tensor id by 8 bit
404 // and add the subgraph id, which allows 256 subgraphs
405 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
406}
407
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000408bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
409{
410 const unsigned int actualSize = actual.GetNumDimensions();
411 if (actualSize != expected.size())
412 {
413 return false;
414 }
415
416 for (unsigned int i = 0u; i < actualSize; i++)
417 {
418 if (expected[i] < 0 ||
419 actual[i] != static_cast<unsigned int>(expected[i]))
420 {
421 return false;
422 }
423 }
424
425 return true;
426}
427
telsoa01c577f2c2018-08-31 09:22:23 +0100428} // <anonymous>
429
430TfLiteParser::TfLiteParser()
431: m_Network(nullptr, nullptr)
432, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
433{
434 // register supported operators
435 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200436 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100437 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100438 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
439 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000440 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100441 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000442 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100443 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200444 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200445 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan58f39192018-09-17 14:14:39 +0100446 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
447 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100448 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -0200449 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
Sadik Armagan479045b2018-10-01 11:51:37 +0100450 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
Bruno Goncalvesbaded142019-02-08 19:02:48 -0200451 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100452 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalves451d95b2019-02-12 22:59:22 -0200453 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200454 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200455 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200456 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200457 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200458 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
Nina Drozd0324f482019-04-08 10:52:10 +0100459 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Nina Drozd99851762019-04-09 09:37:38 +0100460 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
Nina Drozd200e3802019-04-15 09:47:39 +0100461 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
telsoa01c577f2c2018-08-31 09:22:23 +0100462}
463
464void TfLiteParser::ResetParser()
465{
466 m_Network = armnn::INetworkPtr(nullptr, nullptr);
467 m_Model = nullptr;
468 m_SubgraphConnections.clear();
469}
470
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200471void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
472 size_t operatorIndex,
473 IConnectableLayer *layer)
474{
475 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
476 BOOST_ASSERT(layer != nullptr);
477
Derek Lambertiff05cc52019-04-26 13:05:17 +0100478 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
479 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200480
481 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
482
483 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100484 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200485 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100486 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200487
488 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
489 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
490
491 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
492 {
493 uint32_t id = reshapedInputId;
494 reshapedInputId = inputId;
495 inputId = id;
496
497 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
498 inputTensorInfo = ToTensorInfo(tensorPtr);
499 }
500
501 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
502
503 std::vector<unsigned> reshapedDim;
504 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
505 {
506 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
507 }
508
509 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
510 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
511
512 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
513
514 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
515 armnn::ReshapeDescriptor desc;
516 desc.m_TargetShape = reshapedTensorInfo.GetShape();
517 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
518
519 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
520 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
521
522 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
523
524 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
525 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
526}
527
telsoa01c577f2c2018-08-31 09:22:23 +0100528INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
529{
530 ResetParser();
531 m_Model = LoadModelFromFile(graphFile);
532 return CreateNetworkFromModel();
533}
534
535INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
536{
537 ResetParser();
538 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
539 return CreateNetworkFromModel();
540}
541
542INetworkPtr TfLiteParser::CreateNetworkFromModel()
543{
544 m_Network = INetwork::Create();
545 BOOST_ASSERT(m_Model.get() != nullptr);
546
547 bool failedToCreate = false;
548 std::stringstream errors;
549
550 if (m_Model->subgraphs.size() != 1)
551 {
552 throw ParseException(
553 boost::str(
554 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
555 m_Model->subgraphs.size() %
556 CHECK_LOCATION().AsString()));
557 }
558
559 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100560 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100561 {
562 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
563
564 size_t operatorIndex = 0;
565 for (OperatorPtr const & op : subgraph->operators)
566 {
567 try
568 {
telsoa01c577f2c2018-08-31 09:22:23 +0100569 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
570 auto builtinCode = opCodePtr->builtin_code;
571
572 if (builtinCode > tflite::BuiltinOperator_MAX)
573 {
574 throw ParseException(
575 boost::str(
576 boost::format("Operator code %1% is out of range 0-%2%. "
577 "subgraph:%3% operator idx:%4%. %5%") %
578 builtinCode %
579 tflite::BuiltinOperator_MAX %
580 subgraphIndex %
581 operatorIndex %
582 CHECK_LOCATION().AsString()));
583 }
584
585 // lookup and call the parser function
586 auto & parserFunction = m_ParserFunctions[builtinCode];
587 (this->*parserFunction)(subgraphIndex, operatorIndex);
588 }
589 catch (const ParseException& e)
590 {
591 failedToCreate = true;
592 std::stringstream errorString;
593
594 errorString << "Failed to parse operator #" << operatorIndex
595 << " within subgraph #" << subgraphIndex
596 << " error: " << e.what();
597 BOOST_LOG_TRIVIAL(error) << errorString.str();
598
599 errors << errorString.str() << "\n";
600 }
601 ++operatorIndex;
602 }
603
604 SetupInputLayers(subgraphIndex);
605 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200606 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100607
608 ++subgraphIndex;
609 }
610
611 if (failedToCreate)
612 {
613 // we can skip everything and let the outer exception handler deal with the error
614 throw ParseException(errors.str());
615 }
616
617 // establish the connections from the layer outputs to the inputs of the subsequent layers
618 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
619 {
620 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
621 {
622 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
623 {
624 for (size_t inputSlotIdx = 0;
625 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
626 ++inputSlotIdx)
627 {
628 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
629 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
630 }
631 }
632 }
633 }
634
635 return std::move(m_Network);
636}
637
638void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
639 size_t tensorIndex,
640 armnn::IOutputSlot* slot)
641{
642 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
643 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
644 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
645
646 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
647
648 // assuming there is only one producer for that tensor
649 if (tensorSlots.outputSlot != nullptr)
650 {
651 throw ParseException(boost::str(
652 boost::format("Another layer has already registered itself as the producer of "
653 "subgraph:%1% tensor:%2% %3%") %
654 subgraphIndex %
655 tensorIndex %
656 CHECK_LOCATION().AsString()));
657 }
658
659 tensorSlots.outputSlot = slot;
660}
661
662void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
663 size_t tensorIndex,
664 armnn::IInputSlot* slot)
665{
666 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
667 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
668 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
669
670 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
671 tensorSlots.inputSlots.push_back(slot);
672}
673
674void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
675{
676 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
677 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
678 //
679 auto opcodeIndex = operatorPtr->opcode_index;
680 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
681
682 throw ParseException(
683 boost::str(
684 boost::format("Operator not supported. "
685 "subgraph:%1% operator:%2% "
686 "opcode_index:%3% opcode:%4% / %5% %6%") %
687 subgraphIndex %
688 operatorIndex %
689 opcodeIndex %
690 opcode %
691 tflite::EnumNameBuiltinOperator(opcode) %
692 CHECK_LOCATION().AsString()));
693}
694
telsoa01c577f2c2018-08-31 09:22:23 +0100695void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
696{
697 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
698
699 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
700 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
701
702 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
703
704 Convolution2dDescriptor desc;
705 desc.m_BiasEnabled = false;
706 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
707 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000708 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100709 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
710 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000711
telsoa01c577f2c2018-08-31 09:22:23 +0100712 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
713 CHECK_VALID_SIZE(inputs.size(), 2, 3);
714
715 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
716 CHECK_VALID_SIZE(outputs.size(), 1);
717
718 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
719 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
720
721 // assuming input is NHWC
722 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
723 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
724
725 // assuming the filter is OHWI : Output, H, W, Input
726 // which is essentially the same as NHWC
727 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
728 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
729
Pablo Tellof0bd6832019-04-26 17:58:13 +0100730 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
731 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
732 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
733 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100734
Matteo Martincigh747ef822018-12-18 09:26:39 +0000735 auto filterTensorAndData = CreateConstTensor(inputs[1],
736 filterTensorInfo,
737 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100738 armnn::IConnectableLayer* layer;
739
740 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
741
742 if (inputs.size() == 3)
743 {
744 desc.m_BiasEnabled = true;
745 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000746 auto biasTensorAndData = CreateConstTensor(inputs[2],
747 biasTensorInfo,
748 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100749 layer = m_Network->AddConvolution2dLayer(desc,
750 filterTensorAndData.first,
751 biasTensorAndData.first,
752 layerName.c_str());
753 }
754 else
755 {
756 layer = m_Network->AddConvolution2dLayer(desc,
757 filterTensorAndData.first,
758 layerName.c_str());
759 }
760
761 BOOST_ASSERT(layer != nullptr);
762
telsoa01c577f2c2018-08-31 09:22:23 +0100763 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000764 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100765
766 // register the input connection slots for the layer, connections are made after all layers have been created
767 // only the tensors for the inputs are relevant, exclude the const tensors
768 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000769 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100770
jimfly01c25411c2018-11-14 17:47:22 +0000771 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100772 // register the output connection slots for the layer, connections are made after all layers have been created
773 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
774 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
775}
776
777void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
778{
779 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
780
781 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
782 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
783
784 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
785
786 DepthwiseConvolution2dDescriptor desc;
787 desc.m_BiasEnabled = false;
788 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
789 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000790 desc.m_DataLayout = armnn::DataLayout::NHWC;
telsoa01c577f2c2018-08-31 09:22:23 +0100791 // ACL only supports a depth (channel) multiplier of 1, it is not currently stored in the descriptor
792 CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1);
793
794 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
795 CHECK_VALID_SIZE(inputs.size(), 2, 3);
796 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
797 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100798 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
799 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000800
telsoa01c577f2c2018-08-31 09:22:23 +0100801 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
802 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
803
Matteo Martincigh747ef822018-12-18 09:26:39 +0000804 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100805 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
806 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000807
808 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100809 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
810 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
811
Matteo Martincigh747ef822018-12-18 09:26:39 +0000812 // Reshape weights as [ H, W, I, M ]
813 filterTensorInfo.SetShape({ filterHeight,
814 filterWidth,
815 inputTensorInfo.GetShape()[3],
816 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
817
818 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
819 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
820
Pablo Tellof0bd6832019-04-26 17:58:13 +0100821 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
822 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
823 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
824 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100825
Matteo Martincigh747ef822018-12-18 09:26:39 +0000826 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100827 armnn::IConnectableLayer* layer;
828 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
829
830 if (inputs.size() == 3)
831 {
832 desc.m_BiasEnabled = true;
833 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000834 auto biasTensorAndData = CreateConstTensor(inputs[2],
835 biasTensorInfo,
836 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100837 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
838 filterTensorAndData.first,
839 biasTensorAndData.first,
840 layerName.c_str());
841 }
842 else
843 {
844 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
845 filterTensorAndData.first,
846 layerName.c_str());
847 }
848 BOOST_ASSERT(layer != nullptr);
849
telsoa01c577f2c2018-08-31 09:22:23 +0100850 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000851 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100852
853 // register the input connection slots for the layer, connections are made after all layers have been created
854 // only the tensors for the inputs are relevant, exclude the const tensors
855 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000856 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100857
jimfly01c25411c2018-11-14 17:47:22 +0000858 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100859 // register the output connection slots for the layer, connections are made after all layers have been created
860 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
861 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
862}
863
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100864void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
865{
866 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
867}
868
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200869void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
870{
871 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
872
873 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
874 CHECK_VALID_SIZE(inputs.size(), 3);
875
876 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
877 CHECK_VALID_SIZE(outputs.size(), 1);
878
879 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
880 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
881
882 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
883 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
884
885 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
886 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
887
888 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
889 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
890
891 size_t step = 2;
892 std::vector<std::pair<unsigned int, unsigned int>> crops;
893 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
894 {
895 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
896 }
897
898 armnn::BatchToSpaceNdDescriptor desc;
899 desc.m_BlockShape = blockShape;
900 desc.m_Crops = crops;
901 desc.m_DataLayout = armnn::DataLayout::NHWC;
902
903 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
904
905 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
906 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
907
908 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
909
910 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
911 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
912
913 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
914 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
915}
916
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100917void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
918{
919 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
920}
921
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200922void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
923{
924 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
925
926 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
927 CHECK_VALID_SIZE(inputs.size(), 2);
928
929 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
930 CHECK_VALID_SIZE(outputs.size(), 1);
931
932 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
933 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
934
935 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
936 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
937
938 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
939 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
940
941 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
942 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
943 {
944 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
945 }
946 else
947 {
948 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
949 }
950
951 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
952 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
953}
954
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200955void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
956{
957 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
958
959 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
960 CHECK_VALID_SIZE(inputs.size(), 2);
961
962 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
963 CHECK_VALID_SIZE(outputs.size(), 1);
964
965 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
966 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
967
968 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
969 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
970
971 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
972 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
973
974 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
975 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
976 {
977 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
978 }
979 else
980 {
981 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
982 }
983
984 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
985 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
986}
987
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100988void TfLiteParser::ParsePool(size_t subgraphIndex,
989 size_t operatorIndex,
990 PoolingAlgorithm algorithm)
991{
992 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
993
994 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
995 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
996
997 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
998
999 std::string layerName;
1000
1001 switch (algorithm)
1002 {
1003 case PoolingAlgorithm::Average:
1004 layerName =
1005 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1006 break;
1007 case PoolingAlgorithm::Max:
1008 layerName =
1009 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1010 break;
1011 default:
1012 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1013 }
1014
1015 Pooling2dDescriptor desc;
1016
1017 desc.m_PoolType = algorithm;
1018 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1019 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1020 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1021 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1022 desc.m_PaddingMethod = PaddingMethod::Exclude;
1023 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001024 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001025
1026 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1027 CHECK_VALID_SIZE(inputs.size(), 1);
1028 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1029
1030 // assuming input is NHWC
1031 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1032 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1033
Pablo Tellof0bd6832019-04-26 17:58:13 +01001034 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1035 desc.m_PadTop, desc.m_PadBottom, options->padding);
1036 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1037 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001038
1039 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1040 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001041
1042 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1043
1044 BOOST_ASSERT(layer != nullptr);
1045
jimfly01c25411c2018-11-14 17:47:22 +00001046 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1047 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001048
1049 // register the input connection slots for the layer, connections are made after all layers have been created
1050 // only the tensors for the inputs are relevant, exclude the const tensors
1051 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001052 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001053
jimfly01c25411c2018-11-14 17:47:22 +00001054 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001055 // register the output connection slots for the layer, connections are made after all layers have been created
1056 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1057 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1058}
1059
telsoa01c577f2c2018-08-31 09:22:23 +01001060void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1061{
1062 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1063 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1064 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1065
1066 SoftmaxDescriptor desc;
1067 desc.m_Beta = options->beta;
1068
1069 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1070 CHECK_VALID_SIZE(inputs.size(), 1);
1071 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1072 CHECK_VALID_SIZE(outputs.size(), 1);
1073
1074 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1075 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1076
1077 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1078 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1079
1080 // register the input connection slots for the layer, connections are made after all layers have been created
1081 // only the tensors for the inputs are relevant, exclude the const tensors
1082 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1083 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1084
1085 // register the output connection slots for the layer, connections are made after all layers have been created
1086 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1087 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1088}
1089
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001090void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1091{
1092 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1093
1094 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1095 CHECK_VALID_SIZE(inputs.size(), 3);
1096
1097 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1098 CHECK_VALID_SIZE(outputs.size(), 1);
1099
1100 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1101 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1102
1103 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1104 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1105
1106 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1107 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1108
1109 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1110 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1111
1112 size_t step = 2;
1113 std::vector<std::pair<unsigned int, unsigned int>> padList;
1114 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1115 {
1116 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1117 }
1118
1119 armnn::SpaceToBatchNdDescriptor desc;
1120 desc.m_BlockShape = blockShape;
1121 desc.m_PadList = padList;
1122 desc.m_DataLayout = armnn::DataLayout::NHWC;
1123
1124 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1125
1126 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1127 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1128
1129 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1130
1131 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1132 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1133
1134 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1135 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1136}
1137
telsoa01c577f2c2018-08-31 09:22:23 +01001138armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1139 const armnn::TensorInfo & inputTensorInfo)
1140{
1141 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1142 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1143 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1144
1145 if (inputTensorInfo.GetNumDimensions() > 4)
1146 {
1147 std::stringstream ss;
1148 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1149 << " shape:" << inputTensorInfo.GetShape() << " "
1150 << CHECK_LOCATION().AsString();
1151 throw ParseException(ss.str());
1152 }
1153
1154 if (squeezeDims.empty())
1155 {
1156 squeezeDims.assign(dimensionSequence,
1157 dimensionSequence+inputTensorInfo.GetNumDimensions());
1158 }
1159
1160 std::vector<uint32_t> outputDims;
1161 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1162 {
1163 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1164 auto currentDimension = inputTensorInfo.GetShape()[i];
1165 if (skipSqueeze || currentDimension != 1)
1166 {
1167 outputDims.push_back(currentDimension);
1168 }
1169 }
1170
1171 if (outputDims.size() > 4)
1172 {
1173 std::stringstream ss;
1174 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1175 << " shape:" << inputTensorInfo.GetShape() << " "
1176 << CHECK_LOCATION().AsString();
1177 throw ParseException(ss.str());
1178 }
1179
1180 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1181 outputDims.data());
1182
1183 // we need to preserve the tensor type and the quantization data as well
1184 TensorInfo outTensorInfo = inputTensorInfo;
1185 outTensorInfo.SetShape(outShape);
1186
1187 return outTensorInfo;
1188}
1189
1190void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1191{
1192 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1193
1194 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1195 CHECK_VALID_SIZE(inputs.size(), 1);
1196
1197 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1198 CHECK_VALID_SIZE(outputs.size(), 1);
1199
1200 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1201 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1202
1203 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1204 armnn::TensorInfo outputTensorInfo =
1205 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1206 inputTensorInfo);
1207
1208 ReshapeDescriptor reshapeDesc;
1209 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1210
1211 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1212 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1213 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1214
1215 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1216 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1217
1218 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1219 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1220}
1221
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001222void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1223{
1224 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1225
1226 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1227 CHECK_VALID_SIZE(inputs.size(), 4);
1228
1229 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1230 CHECK_VALID_SIZE(outputs.size(), 1);
1231
1232 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1233 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1234
1235 StridedSliceDescriptor desc;
1236 desc.m_BeginMask = options->begin_mask;
1237 desc.m_EllipsisMask = options->ellipsis_mask;
1238 desc.m_EndMask = options->end_mask;
1239 desc.m_NewAxisMask = options->new_axis_mask;
1240 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1241 desc.m_DataLayout = armnn::DataLayout::NHWC;
1242
1243 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1244 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1245
1246 std::vector<int> begin(beginTensorInfo.GetNumElements());
1247 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1248
1249 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1250 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1251
1252 std::vector<int> end(endTensorInfo.GetNumElements());
1253 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1254
1255 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1256 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1257
1258 std::vector<int> stride(strideTensorInfo.GetNumElements());
1259 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1260
1261 desc.m_Begin = begin;
1262 desc.m_End = end;
1263 desc.m_Stride = stride;
1264
1265 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1266 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1267
1268 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1269 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1270
1271 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1272 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1273
1274 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1275 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1276}
1277
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001278void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1279{
1280 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1281
1282 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1283 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1284
1285 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1286 CHECK_VALID_SIZE(inputs.size(), 2);
1287
1288 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1289 CHECK_VALID_SIZE(outputs.size(), 1);
1290
1291 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1292 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1293
1294 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1295 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1296
1297 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1298 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1299
1300 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1301 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1302 {
1303 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1304 }
1305 else
1306 {
1307 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1308 }
1309
1310 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1311
1312 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1313 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1314}
1315
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001316void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1317{
1318 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1319
1320 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1321 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1322
1323 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1324 CHECK_VALID_SIZE(inputs.size(), 2);
1325
1326 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1327 CHECK_VALID_SIZE(outputs.size(), 1);
1328
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001329 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1330 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1331
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001332 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1333 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1334
1335 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1336 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1337
1338 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001339 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1340 {
1341 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1342 }
1343 else
1344 {
1345 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1346 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001347
1348 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1349
1350 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1351 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1352}
1353
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001354void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1355{
1356 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1357
1358 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1359 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1360
1361 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1362 CHECK_VALID_SIZE(inputs.size(), 2);
1363
1364 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1365 CHECK_VALID_SIZE(outputs.size(), 1);
1366
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001367 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1368 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1369
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001370 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1371 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1372
1373 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1374 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1375
1376 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001377 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1378 {
1379 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1380 }
1381 else
1382 {
1383 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1384 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001385
1386 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1387
1388 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1389 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1390}
1391
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001392void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1393{
1394 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1395
1396 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1397
1398 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1399 CHECK_VALID_SIZE(outputs.size(), 1);
1400
1401 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1402 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1403
1404 armnn::MeanDescriptor desc;
1405 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1406 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1407 desc.m_Axis = axis;
1408
1409 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1410 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1411
1412 desc.m_KeepDims =
1413 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1414 true : false;
1415
1416 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1417 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1418
1419 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1420
1421 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1422 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1423
1424 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1425 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1426}
1427
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001428void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1429{
1430 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1431
1432 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1433
1434 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1435 CHECK_VALID_SIZE(outputs.size(), 1);
1436
1437 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1438 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1439
1440 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1441 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1442
1443 size_t step = 2;
1444 armnn::PadDescriptor desc;
1445 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1446 {
1447 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1448 }
1449
1450 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1451 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1452
1453 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1454 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1455
1456 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1457 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1458
1459 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1460 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1461}
1462
Finn Williamsc42c3842019-01-22 14:18:11 +00001463
Sadik Armagan58f39192018-09-17 14:14:39 +01001464void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1465{
Finn Williamsc42c3842019-01-22 14:18:11 +00001466 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001467}
1468
1469void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1470{
Finn Williamsc42c3842019-01-22 14:18:11 +00001471 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1472}
Sadik Armagan58f39192018-09-17 14:14:39 +01001473
Finn Williamsc42c3842019-01-22 14:18:11 +00001474void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1475{
1476 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1477}
1478
Nina Drozd99851762019-04-09 09:37:38 +01001479void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1480{
1481 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1482}
1483
Finn Williamsc42c3842019-01-22 14:18:11 +00001484
1485void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1486{
1487 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001488 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1489 boost::ignore_unused(operatorPtr);
1490
1491 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1492 CHECK_VALID_SIZE(inputs.size(), 1);
1493
1494 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1495 CHECK_VALID_SIZE(outputs.size(), 1);
1496
Finn Williamsc42c3842019-01-22 14:18:11 +00001497 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001498 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001499 activationDesc.m_Function = activationType;
1500
1501 switch (activationType)
1502 {
1503 case ActivationFunction::ReLu:
1504 {
1505 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1506 break;
1507 }
1508 case ActivationFunction::BoundedReLu:
1509 {
1510 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1511 activationDesc.m_A = 6.0f;
1512 activationDesc.m_B = 0.0f;
1513 break;
1514 }
1515 case ActivationFunction::Sigmoid:
1516 {
1517 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1518 break;
1519 }
Nina Drozd99851762019-04-09 09:37:38 +01001520 case ActivationFunction::TanH:
1521 {
1522 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1523 activationDesc.m_A = 1.0f;
1524 activationDesc.m_B = 1.0f;
1525 break;
1526 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001527 default:
1528 {
1529 throw ParseException(
1530 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1531 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1532 }
1533 }
1534
1535 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001536
1537 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1538 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1539
1540 // register the input connection slots for the layer, connections are made after all layers have been created
1541 // only the tensors for the inputs are relevant, exclude the const tensors
1542 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1543 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1544
1545 // register the output connection slots for the layer, connections are made after all layers have been created
1546 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1547 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1548}
Sadikb94967b2018-09-19 15:30:00 +01001549armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1550 const std::vector<int32_t> & targetDimsIn)
1551{
1552 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1553 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1554
1555 if (stretchDim != targetDimsIn.end())
1556 {
1557 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1558 {
1559 throw ParseException(
1560 boost::str(
1561 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1562 }
1563
1564 auto targetNumElements =
1565 boost::numeric_cast<unsigned int>(
1566 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1567
1568 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1569 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1570 }
1571
1572 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1573
1574 TensorInfo reshapeInfo = inputTensorInfo;
1575 reshapeInfo.SetShape(outputShape);
1576
1577 return reshapeInfo;
1578}
1579
1580void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1581{
1582 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1583
1584 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001585
1586 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1587 CHECK_VALID_SIZE(outputs.size(), 1);
1588
1589 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1590 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1591
1592 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001593 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1594 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001595 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1596
kevmay0171972a82018-12-17 14:28:03 +00001597 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001598 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1599 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001600 {
1601 std::stringstream ss;
1602 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001603 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001604 << " does not equal output shape "
1605 << actualOutputTensorInfo.GetShape()
1606 << ": "
1607 << CHECK_LOCATION().AsString();
1608 throw ParseException(ss.str());
1609 }
1610
Sadikb94967b2018-09-19 15:30:00 +01001611 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001612 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001613
1614 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1615 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001616 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001617
1618 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1619 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1620
1621 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1622 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1623}
1624
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001625void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1626{
1627 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1628
1629 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1630 CHECK_VALID_SIZE(inputs.size(), 2);
1631
1632 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1633 CHECK_VALID_SIZE(outputs.size(), 1);
1634
1635 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1636
1637 // Data for the parsed tensor args (size) must be stored locally.
1638 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1639
1640 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1641 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1642
1643 ResizeBilinearDescriptor desc;
1644 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
1645 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1646 desc.m_DataLayout = armnn::DataLayout::NHWC;
1647
1648 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
1649 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, layerName.c_str());
1650
1651 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1652 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1653
1654 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1655 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1656
1657 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1658 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1659}
1660
Sadik Armagan479045b2018-10-01 11:51:37 +01001661void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1662{
1663 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1664
1665 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1666 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1667
1668 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1669
1670 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1671 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1672 CHECK_VALID_SIZE(outputs.size(), 1);
1673
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001674 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1675 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001676
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001677 const unsigned int concatDimInput = static_cast<unsigned int>(
1678 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001679
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001680 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1681 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001682
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001683 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001684
1685 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1686 {
1687 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1688
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001689 // This set up concatDescriptor view origin
1690 armnnUtils::ProcessConcatInputTensorInfo(
1691 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001692 }
1693
1694 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
1695 IConnectableLayer* layer = m_Network->AddMergerLayer(concatDescriptor, layerName.c_str());
1696
1697 BOOST_ASSERT(layer != nullptr);
1698
1699 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1700 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001701
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001702 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001703
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001704 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001705
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001706 // add fused activation layer
1707 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001708
Sadik Armagan479045b2018-10-01 11:51:37 +01001709 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1710 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1711}
1712
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001713void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1714{
1715 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1716
1717 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1718 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1719
1720 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1721
1722 FullyConnectedDescriptor desc;
1723 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001724 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001725
1726 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1727 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1728 CHECK_VALID_SIZE(outputs.size(), 1);
1729
1730 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1731
1732 // Fully Connected Layer accepts two dimensional weights input
1733 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1734 if (weightsDimension != 2)
1735 {
1736 throw ParseException(
1737 boost::str(
1738 boost::format(
1739 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1740 "Node %2%")
1741 % weightsDimension
1742 % CHECK_LOCATION().AsString()));
1743 }
1744
Matteo Martincigh747ef822018-12-18 09:26:39 +00001745 auto filterTensorAndData = CreateConstTensor(inputs[1],
1746 filterTensorInfo,
1747 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001748 armnn::IConnectableLayer* layer;
1749 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1750
1751 if (inputs.size() == 3)
1752 {
1753 desc.m_BiasEnabled = true;
1754 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001755 auto biasTensorAndData = CreateConstTensor(inputs[2],
1756 biasTensorInfo,
1757 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001758 layer = m_Network->AddFullyConnectedLayer(desc,
1759 filterTensorAndData.first,
1760 biasTensorAndData.first,
1761 layerName.c_str());
1762 }
1763 else
1764 {
1765 layer = m_Network->AddFullyConnectedLayer(desc,
1766 filterTensorAndData.first,
1767 layerName.c_str());
1768 }
1769 BOOST_ASSERT(layer != nullptr);
1770
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001771 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1772
1773 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1774
1775 if (inputTensorInfo.GetNumDimensions() > 2)
1776 {
1777 // Add reshape to flatten to 2D [batch_size, input_size],
1778 // where "input_size" corresponds to the number of inputs to the layer,
1779 // matching the second dimension of weights,
1780 // and "batch_size" is calculated by dividing the number of elements by "input_size".
1781 std::vector<unsigned int> reshapedDimensions(2);
1782 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
1783 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
1784
1785 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
1786 {
1787 throw ParseException(
1788 boost::str(
1789 boost::format(
1790 "Failed to deduce input tensor shape from filter size %1%")
1791 % reshapedDimensions[1]
1792 % CHECK_LOCATION().AsString()));
1793 }
1794
1795 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
1796 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
1797
1798 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
1799 armnn::ReshapeDescriptor desc;
1800 desc.m_TargetShape = reshapedTensorInfo.GetShape();
1801 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
1802
1803 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
1804 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1805
1806 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
1807 }
1808 else
1809 {
1810 // register the input connection slot for the layer
1811 // only the tensors for the inputs are relevant, exclude the const tensors
1812 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1813 }
1814
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001815 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1816 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1817
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001818 // we need to add the activation layer and fortunately we don't need to care about the data layout
1819 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1820 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001821
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001822 // register the output connection slots for the layer, connections are made after all layers have been created
1823 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1824 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1825}
1826
keidav011b3e2ea2019-02-21 10:07:37 +00001827void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1828{
1829 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1830
1831 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1832
1833 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1834 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1835 CHECK_VALID_SIZE(outputs.size(), 4);
1836
1837 // Obtain custom options from flexbuffers
1838 auto custom_options = operatorPtr->custom_options;
1839 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1840
1841 // Obtain descriptor information from tf lite
1842 DetectionPostProcessDescriptor desc;
1843 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1844 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1845 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1846 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1847 desc.m_NumClasses = m["num_classes"].AsUInt32();
1848 desc.m_ScaleH = m["h_scale"].AsFloat();
1849 desc.m_ScaleW = m["w_scale"].AsFloat();
1850 desc.m_ScaleX = m["x_scale"].AsFloat();
1851 desc.m_ScaleY = m["y_scale"].AsFloat();
1852
keidav0107d58c72019-02-26 11:57:39 +00001853 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00001854 {
keidav0107d58c72019-02-26 11:57:39 +00001855 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00001856 }
1857 if (!(m["detections_per_class"].IsNull()))
1858 {
1859 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1860 }
1861
1862 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1863 {
1864 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1865 "must be positive and less than or equal to 1.");
1866 }
1867
1868 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
1869 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
1870 armnn::Optional<armnn::PermutationVector&>());
1871
1872 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
1873 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
1874 layerName.c_str());
1875
1876 BOOST_ASSERT(layer != nullptr);
1877
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001878 // The model does not specify the output shapes.
1879 // The output shapes are calculated from the max_detection and max_classes_per_detection.
1880 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
1881 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
1882 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
1883 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
1884 m_OverridenOutputShapes.push_back({ 1 });
1885
keidav011b3e2ea2019-02-21 10:07:37 +00001886 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
1887 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001888 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00001889 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
1890 }
1891
1892 // Register the input connection slots for the layer, connections are made after all layers have been created
1893 // only the tensors for the inputs are relevant, exclude the const tensors
1894 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1895 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1896
1897 // Register the output connection slots for the layer, connections are made after all layers have been created
1898 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1899 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
1900 outputTensorIndexes[1],
1901 outputTensorIndexes[2],
1902 outputTensorIndexes[3]});
1903}
1904
Nina Drozd200e3802019-04-15 09:47:39 +01001905void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
1906{
1907 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1908
1909 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1910 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
1911
1912 // This unpackAxis indicates the axis to unpack
1913 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
1914
1915 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1916 CHECK_VALID_SIZE(inputs.size(), 1);
1917
1918 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01001919
1920 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
1921 {
1922 throw ParseException(
1923 boost::str(
1924 boost::format(
1925 "The unpack axis: %1% cannot be greater than or equal to "
1926 "the number of input dimension %2% %3%")
1927 % unpackAxis
1928 % inputTensorInfo.GetNumDimensions()
1929 % CHECK_LOCATION().AsString()));
1930 }
1931
Nina Drozd200e3802019-04-15 09:47:39 +01001932 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
1933 // If num is not defined, automatically infer from the length of the dimension axis.
1934 if(unpackNum == 0)
1935 {
1936 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
1937 }
1938
1939 // If unpack number cannot be inferred and is still zero, throw ParseException.
1940 if(unpackNum == 0)
1941 {
1942 throw ParseException("Number to unpack must greater than zero.");
1943 }
1944
1945 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1946 CHECK_VALID_SIZE(outputs.size(), unpackNum);
1947
1948 auto inputDimSize = inputTensorInfo.GetNumDimensions();
1949 std::vector<unsigned int> unpackDimSizes(inputDimSize);
1950
1951 // Add current input shape to unpackDimSizes
1952 for (unsigned int i = 0; i < inputDimSize; ++i)
1953 {
1954 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
1955 }
1956
1957 if (unpackDimSizes[unpackAxis] != unpackNum)
1958 {
1959 throw ParseException("Number to unpack must be the same as length of the dimension to "
1960 "unpack along.");
1961 }
1962
1963 unpackDimSizes[unpackAxis] /= unpackNum;
1964
1965 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
1966 for (unsigned int j = 0; j < unpackNum; ++j)
1967 {
1968 // Set the size of the views.
1969 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
1970 {
1971 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
1972 }
1973 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
1974 }
1975
1976 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
1977 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
1978
Narumol Prangnawarat672de572019-04-23 15:28:06 +01001979 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
1980 unpackDimSizes.data());
1981
Nina Drozd200e3802019-04-15 09:47:39 +01001982 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1983 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1984
Narumol Prangnawarat672de572019-04-23 15:28:06 +01001985 // Reshape to remove unpacked dimension
1986 unsigned int reshapedNumDimensions = inputDimSize - 1;
1987 std::vector<unsigned int> reshapedDimensions(reshapedNumDimensions);
Nina Drozd200e3802019-04-15 09:47:39 +01001988
Narumol Prangnawarat672de572019-04-23 15:28:06 +01001989 unsigned int reshapeIndex = 0;
1990 for (unsigned int i = 0; i < inputDimSize; ++i)
Nina Drozd200e3802019-04-15 09:47:39 +01001991 {
Narumol Prangnawarat672de572019-04-23 15:28:06 +01001992 if (i == unpackAxis)
1993 {
1994 continue;
1995 }
1996 reshapedDimensions[reshapeIndex++] = unpackDimSizes[i];
Nina Drozd200e3802019-04-15 09:47:39 +01001997 }
1998
Narumol Prangnawarat672de572019-04-23 15:28:06 +01001999 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2000 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2001 {
2002 armnn::TensorInfo reshapedTensorInfo = inputTensorInfo;
2003 reshapedTensorInfo.SetShape(armnn::TensorShape{ reshapedNumDimensions, reshapedDimensions.data() });
2004
2005 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2006 armnn::ReshapeDescriptor desc;
2007 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2008 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2009
2010 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape, inputTensorInfo.GetDataType()));
2011 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2012
2013 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2014
2015 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2016 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2017 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2018 }
Nina Drozd200e3802019-04-15 09:47:39 +01002019}
2020
Nina Drozd0324f482019-04-08 10:52:10 +01002021void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2022{
2023 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2024
2025 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2026 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2027
2028 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2029
Nina Drozd200e3802019-04-15 09:47:39 +01002030 // If number of splits cannot be inferred and is zero, throw ParseException.
2031 if(numSplits == 0)
2032 {
2033 throw ParseException("Number to splits must greater than zero.");
2034 }
2035
Nina Drozd0324f482019-04-08 10:52:10 +01002036 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2037 CHECK_VALID_SIZE(inputs.size(), 2);
2038 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2039 CHECK_VALID_SIZE(outputs.size(), numSplits);
2040
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002041 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2042 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002043
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002044 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2045 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2046 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2047
2048 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2049 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002050
2051 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2052 if (splitDim == 0 || splitDim == 2)
2053 {
2054 throw ParseException(
2055 boost::str(
2056 boost::format(
2057 "Dimension %1% for split is not supported by Armnn. %2%")
2058 % splitDim
2059 % CHECK_LOCATION().AsString()));
2060 }
2061
2062 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002063 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002064 {
2065 throw ParseException(
2066 boost::str(
2067 boost::format(
2068 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002069 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002070 % inputTensorInfo.GetNumDimensions()
2071 % MaxNumOfTensorDimensions
2072 % CHECK_LOCATION().AsString()));
2073 }
2074
2075 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2076
2077 // Add current input shape to splitterDimSizes
2078 for (unsigned int i = 0; i < inputDimSize; ++i)
2079 {
2080 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2081 }
2082
2083 if (splitterDimSizes[splitDim] % numSplits != 0)
2084 {
2085 throw ParseException("Number of splits must evenly divide the dimension");
2086 }
2087 splitterDimSizes[splitDim] /= numSplits;
2088
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002089 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002090 for (unsigned int j = 0; j < numSplits; ++j)
2091 {
2092 // Set the size of the views.
2093 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2094 {
2095 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2096 }
2097 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2098 }
2099
2100 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2101 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2102
2103 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002104 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002105
2106 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2107 splitterDimSizes.data());
2108
2109 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2110 {
2111 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(outShape,
2112 inputTensorInfo.GetDataType()));
2113 }
2114
2115 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2116 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2117}
2118
Sadik Armagan58f39192018-09-17 14:14:39 +01002119armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2120 unsigned int outputSlot,
2121 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002122{
2123 ActivationDescriptor activationDesc;
2124 std::string layerName = prevLayer->GetName();
2125
2126 switch(activationType)
2127 {
2128 case tflite::ActivationFunctionType_NONE:
2129 {
2130 // this is a no-op: return previous layer
2131 return prevLayer;
2132 }
2133 case tflite::ActivationFunctionType_RELU:
2134 {
2135 activationDesc.m_Function = ActivationFunction::ReLu;
2136 layerName += ":RELU";
2137 break;
2138 }
2139 case tflite::ActivationFunctionType_RELU6:
2140 {
2141 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2142 activationDesc.m_A = 6.0f;
2143 activationDesc.m_B = 0.0f;
2144 layerName += ":RELU6";
2145 break;
2146 }
2147 case tflite::ActivationFunctionType_TANH:
2148 {
2149 activationDesc.m_Function = ActivationFunction::TanH;
2150 activationDesc.m_A = 1.0f;
2151 activationDesc.m_B = 1.0f;
2152 layerName += ":TANH";
2153 break;
2154 }
2155
2156 // I only put these here as a reminder what others we could support
2157 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2158 case tflite::ActivationFunctionType_SIGN_BIT:
2159 default:
2160 {
2161 throw ParseException(
2162 boost::str(
2163 boost::format("TfLite parser doesn't suppport fused activation: "
2164 "%1%/%2% %3% ") %
2165 activationType %
2166 tflite::EnumNameActivationFunctionType(activationType) %
2167 CHECK_LOCATION().AsString()));
2168
2169 }
2170 }
2171
2172 IConnectableLayer* activationLayer =
2173 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2174
2175 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2176 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2177 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2178 return activationLayer;
2179}
2180
2181TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2182{
2183 if (fileName == nullptr)
2184 {
2185 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2186 CHECK_LOCATION().AsString()));
2187 }
2188 boost::system::error_code errorCode;
2189 boost::filesystem::path pathToFile(fileName);
2190 if (!boost::filesystem::exists(pathToFile, errorCode))
2191 {
2192 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2193 fileName %
2194 errorCode %
2195 CHECK_LOCATION().AsString()));
2196 }
2197 std::ifstream file(fileName, std::ios::binary);
2198 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2199 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2200 fileContent.size());
2201}
2202
2203TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2204{
2205 if (binaryContent == nullptr)
2206 {
2207 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2208 CHECK_LOCATION().AsString()));
2209 }
2210 flatbuffers::Verifier verifier(binaryContent, len);
2211 if (verifier.VerifyBuffer<tflite::Model>() == false)
2212 {
2213 throw ParseException(
2214 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2215 "flatbuffers format. size:%1% %2%") %
2216 len %
2217 CHECK_LOCATION().AsString()));
2218 }
2219 return tflite::UnPackModel(binaryContent);
2220}
2221
2222TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2223 size_t subgraphIndex,
2224 size_t operatorIndex)
2225{
2226 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2227
Derek Lambertiff05cc52019-04-26 13:05:17 +01002228 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2229 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002230
2231 size_t inputCount = operatorPtr->inputs.size();
2232 TensorRawPtrVector result(inputCount);
2233 for (size_t i=0; i<inputCount; ++i)
2234 {
2235 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002236 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002237 }
2238 return result;
2239}
2240
2241TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2242 size_t subgraphIndex,
2243 size_t operatorIndex)
2244{
2245 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2246
Derek Lambertiff05cc52019-04-26 13:05:17 +01002247 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2248 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002249
2250 size_t outputCount = operatorPtr->outputs.size();
2251 TensorRawPtrVector result(outputCount);
2252 for (size_t i=0; i<outputCount; ++i)
2253 {
2254 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2255 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002256 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002257 }
2258 return result;
2259}
2260
2261TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2262 size_t subgraphIndex)
2263{
2264 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002265 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002266
Derek Lambertiff05cc52019-04-26 13:05:17 +01002267 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002268 TensorIdRawPtrVector result(inputCount);
2269 for (size_t i=0; i<inputCount; ++i)
2270 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002271 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002272 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002273 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002274 }
2275 return result;
2276}
2277
2278TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2279 size_t subgraphIndex)
2280{
2281 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002282 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002283
Derek Lambertiff05cc52019-04-26 13:05:17 +01002284 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002285 TensorIdRawPtrVector result(outputCount);
2286 for (size_t i=0; i<outputCount; ++i)
2287 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002288 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2289 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002290 }
2291 return result;
2292}
2293
2294std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2295 size_t subgraphIndex,
2296 size_t operatorIndex)
2297{
2298 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002299 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2300 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002301 return operatorPtr->inputs;
2302}
2303
2304std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2305 size_t subgraphIndex,
2306 size_t operatorIndex)
2307{
2308 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002309 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2310 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002311 return operatorPtr->outputs;
2312}
2313
2314void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2315 size_t operatorIndex,
2316 IConnectableLayer* layer,
2317 const std::vector<unsigned int>& tensorIndexes)
2318{
2319 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2320 BOOST_ASSERT(layer != nullptr);
2321 if (tensorIndexes.size() != layer->GetNumInputSlots())
2322 {
2323 throw ParseException(
2324 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2325 " for subgraph:%3% operator index:%4% %5%") %
2326 tensorIndexes.size() %
2327 layer->GetNumInputSlots() %
2328 subgraphIndex %
2329 operatorIndex %
2330 CHECK_LOCATION().AsString()));
2331 }
2332
2333 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2334 {
2335 unsigned int tensorIndex = tensorIndexes[slotIndex];
2336 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2337 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2338 }
2339}
2340
2341void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2342 size_t operatorIndex,
2343 IConnectableLayer* layer,
2344 const std::vector<unsigned int>& tensorIndexes)
2345{
2346 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2347 BOOST_ASSERT(layer != nullptr);
2348 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2349 {
2350 throw ParseException(
2351 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2352 " for subgraph:%3% operator index:%4% %5%") %
2353 tensorIndexes.size() %
2354 layer->GetNumOutputSlots() %
2355 subgraphIndex %
2356 operatorIndex %
2357 CHECK_LOCATION().AsString()));
2358 }
2359
2360 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2361 {
2362 unsigned int tensorIndex = tensorIndexes[slotIndex];
2363 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2364 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2365 }
2366}
2367
2368void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2369{
2370 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2371
2372 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2373 for (auto const & tensorIdAndPtr : inputs)
2374 {
2375 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2376 IConnectableLayer* layer =
2377 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2378
2379 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2380 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2381
2382 RegisterOutputSlots(subgraphIndex,
2383 VIRTUAL_OPERATOR_ID,
2384 layer,
2385 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2386 }
2387}
2388
2389void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2390{
2391 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2392
2393 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2394 for (auto const & tensorIdAndPtr : outputs)
2395 {
2396 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2397 IConnectableLayer* layer =
2398 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2399
2400 RegisterInputSlots(subgraphIndex,
2401 VIRTUAL_OPERATOR_ID,
2402 layer,
2403 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2404 }
2405}
2406
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002407void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2408{
2409 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2410
Derek Lambertiff05cc52019-04-26 13:05:17 +01002411 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002412 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2413 {
2414 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2415 {
2416 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2417 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2418 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002419 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002420 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2421 auto tensorAndData = CreateConstTensor(tensorPtr,
2422 tensorInfo,
2423 armnn::Optional<armnn::PermutationVector&>());
2424
2425 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2426 IConnectableLayer *layer =
2427 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2428
2429 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2430 RegisterOutputSlots(subgraphIndex,
2431 VIRTUAL_OPERATOR_ID,
2432 layer,
2433 { tensorIndex });
2434
2435 }
2436 }
2437 }
2438}
2439
telsoa01c577f2c2018-08-31 09:22:23 +01002440// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2441TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2442{
2443 CHECK_BUFFER(model, bufferIndex);
2444 return model->buffers[bufferIndex].get();
2445}
2446
Matteo Martincigh747ef822018-12-18 09:26:39 +00002447template<typename T>
2448std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2449TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2450 TfLiteParser::TensorRawPtr tensorPtr,
2451 armnn::TensorInfo& tensorInfo,
2452 armnn::Optional<armnn::PermutationVector&> permutationVector)
2453{
2454 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2455 tensorPtr,
2456 tensorInfo,
2457 permutationVector);
2458 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2459 return std::make_pair(constData.first, std::move(storage));
2460}
2461
telsoa01c577f2c2018-08-31 09:22:23 +01002462std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2463TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002464 armnn::TensorInfo& tensorInfo,
2465 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002466{
2467 CHECK_TENSOR_PTR(tensorPtr);
2468 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2469 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2470
2471 switch (tensorInfo.GetDataType())
2472 {
2473 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002474 return CreateConstTensorAndStoreData<float>(bufferPtr,
2475 tensorPtr,
2476 tensorInfo,
2477 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002478 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002479 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2480 tensorPtr,
2481 tensorInfo,
2482 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002483 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002484 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2485 tensorPtr,
2486 tensorInfo,
2487 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002488 default:
2489 {
2490 std::stringstream errString;
2491 errString << "Unexpected datatype when creating const tensor: "
2492 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2493 << " shape:" << tensorInfo.GetShape()
2494 << CHECK_LOCATION().AsString();
2495 throw ParseException(errString.str());
2496 }
2497 }
2498}
2499
2500BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2501 const std::string& name) const
2502{
2503 CHECK_SUBGRAPH(m_Model, subgraphId);
2504 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2505 for (auto const & input : inputs)
2506 {
2507 if (input.second->name == name)
2508 {
2509 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2510 return std::make_pair(bindingId, ToTensorInfo(input.second));
2511 }
2512 }
2513
2514 std::stringstream bindings;
2515 for (auto const & input : inputs)
2516 {
2517 bindings << "'" << input.second->name << "' ";
2518 }
2519
2520 throw ParseException(
2521 boost::str(
2522 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2523 "Possible inputs are: [%3%] %4%") %
2524 subgraphId %
2525 name %
2526 bindings.str() %
2527 CHECK_LOCATION().AsString()));
2528}
2529
2530BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2531 const std::string& name) const
2532{
2533 CHECK_SUBGRAPH(m_Model, subgraphId);
2534 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002535 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002536 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002537 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002538 if (output.second->name == name)
2539 {
2540 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002541 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2542 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2543 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002544 }
2545 }
2546
2547 std::stringstream bindings;
2548 for (auto const & output : outputs)
2549 {
2550 bindings << "'" << output.second->name << "' ";
2551 }
2552
2553 throw ParseException(
2554 boost::str(
2555 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2556 "Possible outputs are: [%3%] %4%") %
2557 subgraphId %
2558 name %
2559 bindings.str() %
2560 CHECK_LOCATION().AsString()));
2561}
2562
2563size_t TfLiteParser::GetSubgraphCount() const
2564{
2565 return m_Model->subgraphs.size();
2566}
2567
2568std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2569{
2570 CHECK_SUBGRAPH(m_Model, subgraphId);
2571 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2572 std::vector<std::string> result;
2573 result.reserve(inputs.size());
2574 for (auto const & input : inputs)
2575 {
2576 result.push_back(input.second->name);
2577 }
2578 return result;
2579}
2580
2581std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2582{
2583 CHECK_SUBGRAPH(m_Model, subgraphId);
2584 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2585 std::vector<std::string> result;
2586 result.reserve(outputs.size());
2587 for (auto const & output : outputs)
2588 {
2589 result.push_back(output.second->name);
2590 }
2591 return result;
2592}
2593
2594ITfLiteParser* ITfLiteParser::CreateRaw()
2595{
2596 return new TfLiteParser();
2597}
2598
2599ITfLiteParserPtr ITfLiteParser::Create()
2600{
2601 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2602}
2603
2604void ITfLiteParser::Destroy(ITfLiteParser* parser)
2605{
2606 delete parser;
2607}
2608
2609TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2610: m_FloatData(std::move(data))
2611, m_Uint8Data(nullptr)
2612, m_Int32Data(nullptr)
2613{
2614}
2615
2616TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2617: m_FloatData(nullptr)
2618, m_Uint8Data(std::move(data))
2619, m_Int32Data(nullptr)
2620{
2621}
2622
2623TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2624: m_FloatData(nullptr)
2625, m_Uint8Data(nullptr)
2626, m_Int32Data(std::move(data))
2627{
2628}
2629
2630} // armnnTfLiteParser