blob: 0e11a5c3e1f58a5843593e982696a8267bc0decc [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026
27#include <fstream>
28#include <algorithm>
29#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010030#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000031#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010032
33using namespace armnn;
34using armnn::CheckLocation;
35namespace armnnTfLiteParser
36{
37namespace
38{
jimfly01c25411c2018-11-14 17:47:22 +000039
telsoa01c577f2c2018-08-31 09:22:23 +010040const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
41
42void CheckSubgraph(const TfLiteParser::ModelPtr & model,
43 size_t subgraphIndex,
44 const CheckLocation & location)
45{
46 if (model.get() == nullptr)
47 {
48 throw ParseException(
49 boost::str(
50 boost::format("%1% was called with invalid (null) model. "
51 "Possible reason is that the model is not yet loaded and Unpack(ed). "
52 "subgraph:%2% at %3%") %
53 location.m_Function %
54 subgraphIndex %
55 location.FileLine()));
56 }
57 else if (subgraphIndex >= model->subgraphs.size())
58 {
59 throw ParseException(
60 boost::str(
61 boost::format("%1% was called with an invalid subgraph index. "
62 "subgraph:%2% at %3%") %
63 location.m_Function %
64 subgraphIndex %
65 location.FileLine()));
66 }
67}
68
69#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
70 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
71
72void CheckModel(const TfLiteParser::ModelPtr & model,
73 size_t subgraphIndex,
74 size_t operatorIndex,
75 const CheckLocation & location)
76{
77 if (model.get() == nullptr)
78 {
79 throw ParseException(
80 boost::str(
81 boost::format("%1% was called with invalid (null) model. "
82 "Possible reason is that the model is not yet loaded and Unpack(ed). "
83 "subgraph:%2% operator:%3% at %4%") %
84 location.m_Function %
85 subgraphIndex %
86 operatorIndex %
87 location.FileLine()));
88 }
89 else if (subgraphIndex >= model->subgraphs.size())
90 {
91 throw ParseException(
92 boost::str(
93 boost::format("%1% was called with an invalid subgraph index. "
94 "subgraph:%2% operator:%3% at %4%") %
95 location.m_Function %
96 subgraphIndex %
97 operatorIndex %
98 location.FileLine()));
99 }
100 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
101 operatorIndex != VIRTUAL_OPERATOR_ID)
102 {
103 throw ParseException(
104 boost::str(
105 boost::format("%1% was called with an invalid operator index. "
106 "subgraph:%2% operator:%3% at %4%") %
107 location.m_Function %
108 subgraphIndex %
109 operatorIndex %
110 location.FileLine()));
111 }
112}
113
114#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
115 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
116
117void CheckTensor(const TfLiteParser::ModelPtr & model,
118 size_t subgraphIndex,
119 size_t tensorIndex,
120 const CheckLocation & location)
121{
122 // not checking model, because I assume CHECK_MODEL already run
123 // and checked that. An assert would do.
124 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
125
126 // also subgraph index should be checked by CHECK_MODEL so
127 // I only add an assert here
128 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
129
130 // the tensor index is the only one to check here
131 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
132 {
133 throw ParseException(
134 boost::str(
135 boost::format("%1% was called with an invalid tensor index. "
136 "subgraph:%2% tensor:%3% at %4%") %
137 location.m_Function %
138 subgraphIndex %
139 tensorIndex %
140 location.FileLine()));
141 }
142}
143
144#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
145 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
146
147void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
148 const CheckLocation & location)
149{
150 if (rawPtr == nullptr)
151 {
152 throw ParseException(
153 boost::str(
154 boost::format("%1% was called with a null tensor pointer. "
155 "at %2%") %
156 location.m_Function %
157 location.FileLine()));
158
159 }
160}
161
162#define CHECK_TENSOR_PTR(TENSOR_PTR) \
163 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
164
165void CheckBuffer(const TfLiteParser::ModelPtr & model,
166 size_t bufferIndex,
167 const CheckLocation & location)
168{
169 if (model.get() == nullptr)
170 {
171 throw ParseException(
172 boost::str(
173 boost::format("%1% was called with invalid (null) model. "
174 "Possible reason is that the model is not yet loaded and Unpack(ed). "
175 "buffer:%2% at %3%") %
176 location.m_Function %
177 bufferIndex %
178 location.FileLine()));
179 }
180 else if (bufferIndex >= model->buffers.size())
181 {
182 throw ParseException(
183 boost::str(
184 boost::format("%1% was called with an invalid buffer index. "
185 "buffer index:%2% at %3%") %
186 location.m_Function %
187 bufferIndex %
188 location.FileLine()));
189 }
190 else if (model->buffers[bufferIndex].get() == nullptr)
191 {
192 throw ParseException(
193 boost::str(
194 boost::format("The buffer #%1% is null. %3%") %
195 bufferIndex %
196 location.AsString()));
197 }
198}
199
200#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
201 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
202
203void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
204 const armnn::TensorInfo & tensorInfo,
205 uint32_t bufferId,
206 const CheckLocation & location)
207{
208 if (bufferPtr == nullptr)
209 {
210 throw ParseException(
211 boost::str(
212 boost::format("BufferPtr is null for buffer:%1%. %2%") %
213 bufferId %
214 location.AsString()));
215 }
216 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
217 tensorInfo.GetNumBytes() > bufferPtr->data.size())
218 {
219 std::stringstream ss;
220 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
221 << "For tensor: " << tensorInfo.GetShape()
222 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
223 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
224 throw ParseException(ss.str());
225 }
226}
227
228#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
229 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
230
231bool IsActivationSupported(tflite::ActivationFunctionType activationType)
232{
233 switch(activationType)
234 {
235 case tflite::ActivationFunctionType_NONE:
236 case tflite::ActivationFunctionType_RELU:
237 case tflite::ActivationFunctionType_RELU6:
238 case tflite::ActivationFunctionType_TANH:
239 {
240 return true;
241 }
242 default:
243 {
244 return false;
245 }
246 }
247}
248
249#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
250 do { \
251 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
252 { \
253 throw ParseException( \
254 boost::str( \
255 boost::format("TfLite parser doesn't suppport fused activation: " \
256 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
257 OPTION->fused_activation_function % \
258 tflite::EnumNameActivationFunctionType(\
259 OPTION->fused_activation_function) % \
260 __func__ % \
261 SUBGRAPH_INDEX % \
262 OPERATOR_INDEX % \
263 CHECK_LOCATION().FileLine())); \
264 } \
265 } while(false)
266
267
268std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
269{
270 std::vector<unsigned int> result;
271 result.reserve(in.size());
272 for (auto & i : in)
273 {
274 result.push_back(CHECKED_NON_NEGATIVE(i));
275 }
276 return result;
277}
278
279void CalcPadding(uint32_t inputSize,
280 uint32_t filterSize,
281 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100282 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100283 uint32_t& paddingFront,
284 uint32_t& paddingBack,
285 tflite::Padding padding)
286{
287 paddingFront = 0;
288 paddingBack = 0;
289 if (padding == tflite::Padding_SAME)
290 {
291 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100292 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
293 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100294 if (temp > inputSize)
295 {
296 paddingFront = (temp - inputSize) / 2;
297 paddingBack = (temp - inputSize) - paddingFront;
298 }
299 }
300}
301
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000302armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100303{
304 armnn::DataType type;
305 CHECK_TENSOR_PTR(tensorPtr);
306
307 switch (tensorPtr->type)
308 {
309 case tflite::TensorType_UINT8:
310 type = armnn::DataType::QuantisedAsymm8;
311 break;
312 case tflite::TensorType_FLOAT32:
313 type = armnn::DataType::Float32;
314 break;
315 case tflite::TensorType_INT32:
316 type = armnn::DataType::Signed32;
317 break;
318
319 default:
320 {
321 CheckLocation location = CHECK_LOCATION();
322 throw ParseException(
323 boost::str(
324 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
325 tensorPtr->type %
326 tflite::EnumNameTensorType(tensorPtr->type) %
327 tensorPtr->name %
328 location.AsString()));
329 }
330 }
331
332 float quantizationScale = 0.0f;
333 int32_t quantizationOffset = 0;
334
335 if (tensorPtr->quantization.get())
336 {
337 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
338 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
339
340 if (tensorPtr->quantization->scale.size() == 1)
341 {
342 quantizationScale = tensorPtr->quantization->scale[0];
343 }
344 if (tensorPtr->quantization->zero_point.size() == 1)
345 {
346 // NOTE: we lose precision here when converting from 64 bit to 32
347 // but this is what we support at the monent in ArmNN
348 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
349 }
350 }
351
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100352 std::vector<unsigned int> safeShape = shapes;
353 if (safeShape.size() == 0)
354 {
355 safeShape.push_back(1);
356 }
357
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // two statements (on purpose) for easier debugging:
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100359 armnn::TensorInfo result(static_cast<unsigned int>(safeShape.size()),
360 safeShape.data(),
telsoa01c577f2c2018-08-31 09:22:23 +0100361 type,
362 quantizationScale,
363 quantizationOffset);
364 return result;
365}
366
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000367armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
368{
369 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
370 return ToTensorInfo(tensorPtr, dimensions);
371}
372
telsoa01c577f2c2018-08-31 09:22:23 +0100373template<typename T>
374std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
375CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
376 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000377 armnn::TensorInfo& tensorInfo,
378 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100379{
380 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
381 BOOST_ASSERT_MSG(bufferPtr != nullptr,
382 boost::str(
383 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
384
385 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000386
387 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
388 {
389 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000390 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
391 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000392 }
393 else
394 {
395 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
396 }
397
telsoa01c577f2c2018-08-31 09:22:23 +0100398 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
399}
400
telsoa01c577f2c2018-08-31 09:22:23 +0100401armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
402{
403 // generate the binding id by shifting the tensor id by 8 bit
404 // and add the subgraph id, which allows 256 subgraphs
405 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
406}
407
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000408bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
409{
410 const unsigned int actualSize = actual.GetNumDimensions();
411 if (actualSize != expected.size())
412 {
413 return false;
414 }
415
416 for (unsigned int i = 0u; i < actualSize; i++)
417 {
418 if (expected[i] < 0 ||
419 actual[i] != static_cast<unsigned int>(expected[i]))
420 {
421 return false;
422 }
423 }
424
425 return true;
426}
427
telsoa01c577f2c2018-08-31 09:22:23 +0100428} // <anonymous>
429
430TfLiteParser::TfLiteParser()
431: m_Network(nullptr, nullptr)
432, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
433{
434 // register supported operators
435 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200436 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100437 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100438 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
439 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000440 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100441 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000442 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Matthew Jackson28c94572019-07-18 10:47:03 +0100443 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100444 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200445 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200446 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan58f39192018-09-17 14:14:39 +0100447 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
448 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100449 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -0200450 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
Sadik Armagan479045b2018-10-01 11:51:37 +0100451 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
Bruno Goncalvesbaded142019-02-08 19:02:48 -0200452 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100453 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalves451d95b2019-02-12 22:59:22 -0200454 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200455 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200456 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200457 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200458 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Matthew Jacksonbcca1f42019-07-16 11:39:21 +0100459 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200460 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
Nina Drozd0324f482019-04-08 10:52:10 +0100461 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Nina Drozd99851762019-04-09 09:37:38 +0100462 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100463 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
Nina Drozd200e3802019-04-15 09:47:39 +0100464 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
telsoa01c577f2c2018-08-31 09:22:23 +0100465}
466
467void TfLiteParser::ResetParser()
468{
469 m_Network = armnn::INetworkPtr(nullptr, nullptr);
470 m_Model = nullptr;
471 m_SubgraphConnections.clear();
472}
473
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200474void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
475 size_t operatorIndex,
476 IConnectableLayer *layer)
477{
478 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
479 BOOST_ASSERT(layer != nullptr);
480
Derek Lambertiff05cc52019-04-26 13:05:17 +0100481 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
482 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200483
484 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
485
486 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100487 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200488 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100489 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200490
491 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
492 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
493
494 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
495 {
496 uint32_t id = reshapedInputId;
497 reshapedInputId = inputId;
498 inputId = id;
499
500 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
501 inputTensorInfo = ToTensorInfo(tensorPtr);
502 }
503
504 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
505
506 std::vector<unsigned> reshapedDim;
507 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
508 {
509 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
510 }
511
512 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
513 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
514
515 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
516
517 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
518 armnn::ReshapeDescriptor desc;
519 desc.m_TargetShape = reshapedTensorInfo.GetShape();
520 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
521
522 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
523 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
524
525 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
526
527 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
528 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
529}
530
telsoa01c577f2c2018-08-31 09:22:23 +0100531INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
532{
533 ResetParser();
534 m_Model = LoadModelFromFile(graphFile);
535 return CreateNetworkFromModel();
536}
537
538INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
539{
540 ResetParser();
541 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
542 return CreateNetworkFromModel();
543}
544
545INetworkPtr TfLiteParser::CreateNetworkFromModel()
546{
547 m_Network = INetwork::Create();
548 BOOST_ASSERT(m_Model.get() != nullptr);
549
550 bool failedToCreate = false;
551 std::stringstream errors;
552
553 if (m_Model->subgraphs.size() != 1)
554 {
555 throw ParseException(
556 boost::str(
557 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
558 m_Model->subgraphs.size() %
559 CHECK_LOCATION().AsString()));
560 }
561
562 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100563 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100564 {
565 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
566
567 size_t operatorIndex = 0;
568 for (OperatorPtr const & op : subgraph->operators)
569 {
570 try
571 {
telsoa01c577f2c2018-08-31 09:22:23 +0100572 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
573 auto builtinCode = opCodePtr->builtin_code;
574
575 if (builtinCode > tflite::BuiltinOperator_MAX)
576 {
577 throw ParseException(
578 boost::str(
579 boost::format("Operator code %1% is out of range 0-%2%. "
580 "subgraph:%3% operator idx:%4%. %5%") %
581 builtinCode %
582 tflite::BuiltinOperator_MAX %
583 subgraphIndex %
584 operatorIndex %
585 CHECK_LOCATION().AsString()));
586 }
587
588 // lookup and call the parser function
589 auto & parserFunction = m_ParserFunctions[builtinCode];
590 (this->*parserFunction)(subgraphIndex, operatorIndex);
591 }
592 catch (const ParseException& e)
593 {
594 failedToCreate = true;
595 std::stringstream errorString;
596
597 errorString << "Failed to parse operator #" << operatorIndex
598 << " within subgraph #" << subgraphIndex
599 << " error: " << e.what();
600 BOOST_LOG_TRIVIAL(error) << errorString.str();
601
602 errors << errorString.str() << "\n";
603 }
604 ++operatorIndex;
605 }
606
607 SetupInputLayers(subgraphIndex);
608 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200609 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100610
611 ++subgraphIndex;
612 }
613
614 if (failedToCreate)
615 {
616 // we can skip everything and let the outer exception handler deal with the error
617 throw ParseException(errors.str());
618 }
619
620 // establish the connections from the layer outputs to the inputs of the subsequent layers
621 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
622 {
623 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
624 {
625 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
626 {
627 for (size_t inputSlotIdx = 0;
628 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
629 ++inputSlotIdx)
630 {
631 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
632 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
633 }
634 }
635 }
636 }
637
638 return std::move(m_Network);
639}
640
641void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
642 size_t tensorIndex,
643 armnn::IOutputSlot* slot)
644{
645 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
646 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
647 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
648
649 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
650
651 // assuming there is only one producer for that tensor
652 if (tensorSlots.outputSlot != nullptr)
653 {
654 throw ParseException(boost::str(
655 boost::format("Another layer has already registered itself as the producer of "
656 "subgraph:%1% tensor:%2% %3%") %
657 subgraphIndex %
658 tensorIndex %
659 CHECK_LOCATION().AsString()));
660 }
661
662 tensorSlots.outputSlot = slot;
663}
664
665void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
666 size_t tensorIndex,
667 armnn::IInputSlot* slot)
668{
669 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
670 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
671 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
672
673 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
674 tensorSlots.inputSlots.push_back(slot);
675}
676
677void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
678{
679 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
680 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
681 //
682 auto opcodeIndex = operatorPtr->opcode_index;
683 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
684
685 throw ParseException(
686 boost::str(
687 boost::format("Operator not supported. "
688 "subgraph:%1% operator:%2% "
689 "opcode_index:%3% opcode:%4% / %5% %6%") %
690 subgraphIndex %
691 operatorIndex %
692 opcodeIndex %
693 opcode %
694 tflite::EnumNameBuiltinOperator(opcode) %
695 CHECK_LOCATION().AsString()));
696}
697
telsoa01c577f2c2018-08-31 09:22:23 +0100698void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
699{
700 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
701
702 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
703 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
704
705 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
706
707 Convolution2dDescriptor desc;
708 desc.m_BiasEnabled = false;
709 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
710 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000711 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100712 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
713 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000714
telsoa01c577f2c2018-08-31 09:22:23 +0100715 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
716 CHECK_VALID_SIZE(inputs.size(), 2, 3);
717
718 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
719 CHECK_VALID_SIZE(outputs.size(), 1);
720
721 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
722 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
723
724 // assuming input is NHWC
725 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
726 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
727
728 // assuming the filter is OHWI : Output, H, W, Input
729 // which is essentially the same as NHWC
730 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
731 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
732
Pablo Tellof0bd6832019-04-26 17:58:13 +0100733 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
734 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
735 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
736 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100737
Matteo Martincigh747ef822018-12-18 09:26:39 +0000738 auto filterTensorAndData = CreateConstTensor(inputs[1],
739 filterTensorInfo,
740 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100741 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100742
743 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
744
745 if (inputs.size() == 3)
746 {
747 desc.m_BiasEnabled = true;
748 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000749 auto biasTensorAndData = CreateConstTensor(inputs[2],
750 biasTensorInfo,
751 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100752 layer = m_Network->AddConvolution2dLayer(desc,
753 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100754 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100755 layerName.c_str());
756 }
757 else
758 {
759 layer = m_Network->AddConvolution2dLayer(desc,
760 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100761 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100762 layerName.c_str());
763 }
764
765 BOOST_ASSERT(layer != nullptr);
766
telsoa01c577f2c2018-08-31 09:22:23 +0100767 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000768 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100769
770 // register the input connection slots for the layer, connections are made after all layers have been created
771 // only the tensors for the inputs are relevant, exclude the const tensors
772 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000773 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100774
jimfly01c25411c2018-11-14 17:47:22 +0000775 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100776 // register the output connection slots for the layer, connections are made after all layers have been created
777 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
778 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
779}
780
781void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
782{
783 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
784
785 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
786 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
787
788 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
789
790 DepthwiseConvolution2dDescriptor desc;
791 desc.m_BiasEnabled = false;
792 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
793 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000794 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100795 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100796
797 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
798 CHECK_VALID_SIZE(inputs.size(), 2, 3);
799 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
800 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100801 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
802 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000803
telsoa01c577f2c2018-08-31 09:22:23 +0100804 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
805 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
806
Matteo Martincigh747ef822018-12-18 09:26:39 +0000807 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100808 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
809 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000810
811 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100812 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
813 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
814
Matteo Martincigh747ef822018-12-18 09:26:39 +0000815 // Reshape weights as [ H, W, I, M ]
816 filterTensorInfo.SetShape({ filterHeight,
817 filterWidth,
818 inputTensorInfo.GetShape()[3],
819 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
820
821 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
822 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
823
Pablo Tellof0bd6832019-04-26 17:58:13 +0100824 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
825 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
826 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
827 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100828
Matteo Martincigh747ef822018-12-18 09:26:39 +0000829 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100830 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100831 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
832
833 if (inputs.size() == 3)
834 {
835 desc.m_BiasEnabled = true;
836 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000837 auto biasTensorAndData = CreateConstTensor(inputs[2],
838 biasTensorInfo,
839 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100840 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
841 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100842 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100843 layerName.c_str());
844 }
845 else
846 {
847 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
848 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100849 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100850 layerName.c_str());
851 }
852 BOOST_ASSERT(layer != nullptr);
853
telsoa01c577f2c2018-08-31 09:22:23 +0100854 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000855 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100856
857 // register the input connection slots for the layer, connections are made after all layers have been created
858 // only the tensors for the inputs are relevant, exclude the const tensors
859 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000860 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100861
jimfly01c25411c2018-11-14 17:47:22 +0000862 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100863 // register the output connection slots for the layer, connections are made after all layers have been created
864 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
865 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
866}
867
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100868void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
869{
870 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
871
872 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
873 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
874
875 TransposeConvolution2dDescriptor desc;
876 desc.m_BiasEnabled = false;
877 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
878 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
879 desc.m_DataLayout = armnn::DataLayout::NHWC;
880
881 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
882 CHECK_VALID_SIZE(inputs.size(), 2, 3);
883
884 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
885 CHECK_VALID_SIZE(outputs.size(), 1);
886
887 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
888 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
889
890 // TfLite uses NHWC tensors
891 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
892 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
893
894 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
895 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
896
897 CalcPadding(inputHeight,
898 filterHeight,
899 desc.m_StrideY,
900 1, // DilationY
901 desc.m_PadTop,
902 desc.m_PadBottom,
903 options->padding);
904
905 CalcPadding(inputWidth,
906 filterWidth,
907 desc.m_StrideX,
908 1, // DilationX
909 desc.m_PadLeft,
910 desc.m_PadRight,
911 options->padding);
912
913 auto filterTensorAndData = CreateConstTensor(inputs[1],
914 filterTensorInfo,
915 armnn::Optional<armnn::PermutationVector&>());
916
917 armnn::IConnectableLayer* layer = nullptr;
918 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
919
920 if (inputs.size() == 3)
921 {
922 desc.m_BiasEnabled = true;
923 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
924 auto biasTensorAndData = CreateConstTensor(inputs[2],
925 biasTensorInfo,
926 armnn::Optional<armnn::PermutationVector&>());
927 layer = m_Network->AddTransposeConvolution2dLayer(desc,
928 filterTensorAndData.first,
929 Optional<ConstTensor>(biasTensorAndData.first),
930 layerName.c_str());
931 }
932 else
933 {
934 layer = m_Network->AddTransposeConvolution2dLayer(desc,
935 filterTensorAndData.first,
936 EmptyOptional(),
937 layerName.c_str());
938 }
939
940 BOOST_ASSERT(layer != nullptr);
941
942 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
943 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
944
945 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
946 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
947 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
948
949 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
950 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
951}
952
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100953void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
954{
955 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
956}
957
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200958void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
959{
960 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
961
962 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
963 CHECK_VALID_SIZE(inputs.size(), 3);
964
965 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
966 CHECK_VALID_SIZE(outputs.size(), 1);
967
968 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
969 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
970
971 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
972 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
973
974 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
975 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
976
977 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
978 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
979
980 size_t step = 2;
981 std::vector<std::pair<unsigned int, unsigned int>> crops;
982 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
983 {
984 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
985 }
986
987 armnn::BatchToSpaceNdDescriptor desc;
988 desc.m_BlockShape = blockShape;
989 desc.m_Crops = crops;
990 desc.m_DataLayout = armnn::DataLayout::NHWC;
991
992 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
993
994 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
995 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
996
997 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
998
999 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1000 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1001
1002 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1003 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1004}
1005
Matthew Jackson28c94572019-07-18 10:47:03 +01001006void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1007{
1008 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1009
1010 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1011 CHECK_VALID_SIZE(inputs.size(), 1);
1012
1013 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1014 CHECK_VALID_SIZE(outputs.size(), 1);
1015
1016 L2NormalizationDescriptor desc;
1017 desc.m_DataLayout = armnn::DataLayout::NHWC;
1018 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1019 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1020
1021 BOOST_ASSERT(layer != nullptr);
1022
1023 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1024 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1025
1026 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1027 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1028
1029 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1030 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1031}
1032
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001033void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1034{
1035 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1036}
1037
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001038void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1039{
1040 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1041
1042 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1043 CHECK_VALID_SIZE(inputs.size(), 2);
1044
1045 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1046 CHECK_VALID_SIZE(outputs.size(), 1);
1047
1048 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1049 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1050
1051 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1052 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1053
1054 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1055 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1056
1057 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1058 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1059 {
1060 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1061 }
1062 else
1063 {
1064 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1065 }
1066
1067 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1068 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1069}
1070
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001071void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1072{
1073 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1074
1075 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1076 CHECK_VALID_SIZE(inputs.size(), 2);
1077
1078 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1079 CHECK_VALID_SIZE(outputs.size(), 1);
1080
1081 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1082 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1083
1084 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1085 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1086
1087 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1088 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1089
1090 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1091 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1092 {
1093 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1094 }
1095 else
1096 {
1097 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1098 }
1099
1100 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1101 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1102}
1103
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001104void TfLiteParser::ParsePool(size_t subgraphIndex,
1105 size_t operatorIndex,
1106 PoolingAlgorithm algorithm)
1107{
1108 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1109
1110 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1111 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1112
1113 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1114
1115 std::string layerName;
1116
1117 switch (algorithm)
1118 {
1119 case PoolingAlgorithm::Average:
1120 layerName =
1121 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1122 break;
1123 case PoolingAlgorithm::Max:
1124 layerName =
1125 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1126 break;
1127 default:
1128 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1129 }
1130
1131 Pooling2dDescriptor desc;
1132
1133 desc.m_PoolType = algorithm;
1134 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1135 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1136 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1137 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1138 desc.m_PaddingMethod = PaddingMethod::Exclude;
1139 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001140 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001141
1142 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1143 CHECK_VALID_SIZE(inputs.size(), 1);
1144 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1145
1146 // assuming input is NHWC
1147 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1148 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1149
Pablo Tellof0bd6832019-04-26 17:58:13 +01001150 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1151 desc.m_PadTop, desc.m_PadBottom, options->padding);
1152 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1153 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001154
1155 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1156 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001157
1158 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1159
1160 BOOST_ASSERT(layer != nullptr);
1161
jimfly01c25411c2018-11-14 17:47:22 +00001162 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1163 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001164
1165 // register the input connection slots for the layer, connections are made after all layers have been created
1166 // only the tensors for the inputs are relevant, exclude the const tensors
1167 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001168 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001169
jimfly01c25411c2018-11-14 17:47:22 +00001170 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001171 // register the output connection slots for the layer, connections are made after all layers have been created
1172 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1173 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1174}
1175
telsoa01c577f2c2018-08-31 09:22:23 +01001176void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1177{
1178 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1179 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1180 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1181
1182 SoftmaxDescriptor desc;
1183 desc.m_Beta = options->beta;
1184
1185 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1186 CHECK_VALID_SIZE(inputs.size(), 1);
1187 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1188 CHECK_VALID_SIZE(outputs.size(), 1);
1189
1190 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1191 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1192
1193 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1194 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1195
1196 // register the input connection slots for the layer, connections are made after all layers have been created
1197 // only the tensors for the inputs are relevant, exclude the const tensors
1198 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1199 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1200
1201 // register the output connection slots for the layer, connections are made after all layers have been created
1202 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1203 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1204}
1205
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001206void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1207{
1208 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1209
1210 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1211 CHECK_VALID_SIZE(inputs.size(), 3);
1212
1213 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1214 CHECK_VALID_SIZE(outputs.size(), 1);
1215
1216 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1217 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1218
1219 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1220 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1221
1222 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1223 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1224
1225 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1226 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1227
1228 size_t step = 2;
1229 std::vector<std::pair<unsigned int, unsigned int>> padList;
1230 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1231 {
1232 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1233 }
1234
1235 armnn::SpaceToBatchNdDescriptor desc;
1236 desc.m_BlockShape = blockShape;
1237 desc.m_PadList = padList;
1238 desc.m_DataLayout = armnn::DataLayout::NHWC;
1239
1240 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1241
1242 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1243 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1244
1245 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1246
1247 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1248 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1249
1250 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1251 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1252}
1253
telsoa01c577f2c2018-08-31 09:22:23 +01001254armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1255 const armnn::TensorInfo & inputTensorInfo)
1256{
1257 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1258 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1259 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1260
1261 if (inputTensorInfo.GetNumDimensions() > 4)
1262 {
1263 std::stringstream ss;
1264 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1265 << " shape:" << inputTensorInfo.GetShape() << " "
1266 << CHECK_LOCATION().AsString();
1267 throw ParseException(ss.str());
1268 }
1269
1270 if (squeezeDims.empty())
1271 {
1272 squeezeDims.assign(dimensionSequence,
1273 dimensionSequence+inputTensorInfo.GetNumDimensions());
1274 }
1275
1276 std::vector<uint32_t> outputDims;
1277 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1278 {
1279 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1280 auto currentDimension = inputTensorInfo.GetShape()[i];
1281 if (skipSqueeze || currentDimension != 1)
1282 {
1283 outputDims.push_back(currentDimension);
1284 }
1285 }
1286
1287 if (outputDims.size() > 4)
1288 {
1289 std::stringstream ss;
1290 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1291 << " shape:" << inputTensorInfo.GetShape() << " "
1292 << CHECK_LOCATION().AsString();
1293 throw ParseException(ss.str());
1294 }
1295
1296 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1297 outputDims.data());
1298
1299 // we need to preserve the tensor type and the quantization data as well
1300 TensorInfo outTensorInfo = inputTensorInfo;
1301 outTensorInfo.SetShape(outShape);
1302
1303 return outTensorInfo;
1304}
1305
1306void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1307{
1308 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1309
1310 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1311 CHECK_VALID_SIZE(inputs.size(), 1);
1312
1313 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1314 CHECK_VALID_SIZE(outputs.size(), 1);
1315
1316 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1317 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1318
1319 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1320 armnn::TensorInfo outputTensorInfo =
1321 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1322 inputTensorInfo);
1323
1324 ReshapeDescriptor reshapeDesc;
1325 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1326
1327 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1328 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1329 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1330
1331 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1332 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1333
1334 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1335 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1336}
1337
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001338void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1339{
1340 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1341
1342 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1343 CHECK_VALID_SIZE(inputs.size(), 4);
1344
1345 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1346 CHECK_VALID_SIZE(outputs.size(), 1);
1347
1348 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1349 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1350
1351 StridedSliceDescriptor desc;
1352 desc.m_BeginMask = options->begin_mask;
1353 desc.m_EllipsisMask = options->ellipsis_mask;
1354 desc.m_EndMask = options->end_mask;
1355 desc.m_NewAxisMask = options->new_axis_mask;
1356 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1357 desc.m_DataLayout = armnn::DataLayout::NHWC;
1358
1359 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1360 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1361
1362 std::vector<int> begin(beginTensorInfo.GetNumElements());
1363 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1364
1365 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1366 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1367
1368 std::vector<int> end(endTensorInfo.GetNumElements());
1369 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1370
1371 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1372 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1373
1374 std::vector<int> stride(strideTensorInfo.GetNumElements());
1375 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1376
1377 desc.m_Begin = begin;
1378 desc.m_End = end;
1379 desc.m_Stride = stride;
1380
1381 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1382 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1383
1384 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1385 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1386
1387 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1388 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1389
1390 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1391 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1392}
1393
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001394void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1395{
1396 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1397
1398 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1399 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1400
1401 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1402 CHECK_VALID_SIZE(inputs.size(), 2);
1403
1404 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1405 CHECK_VALID_SIZE(outputs.size(), 1);
1406
1407 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1408 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1409
1410 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1411 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1412
1413 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1414 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1415
1416 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1417 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1418 {
1419 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1420 }
1421 else
1422 {
1423 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1424 }
1425
1426 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1427
1428 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1429 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1430}
1431
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001432void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1433{
1434 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1435
1436 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1437 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1438
1439 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1440 CHECK_VALID_SIZE(inputs.size(), 2);
1441
1442 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1443 CHECK_VALID_SIZE(outputs.size(), 1);
1444
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001445 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1446 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1447
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001448 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1449 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1450
1451 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1452 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1453
1454 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001455 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1456 {
1457 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1458 }
1459 else
1460 {
1461 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1462 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001463
1464 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1465
1466 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1467 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1468}
1469
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001470void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1471{
1472 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1473
1474 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1475 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1476
1477 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1478 CHECK_VALID_SIZE(inputs.size(), 2);
1479
1480 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1481 CHECK_VALID_SIZE(outputs.size(), 1);
1482
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001483 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1484 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1485
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001486 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1487 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1488
1489 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1490 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1491
1492 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001493 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1494 {
1495 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1496 }
1497 else
1498 {
1499 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1500 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001501
1502 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1503
1504 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1505 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1506}
1507
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001508void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1509{
1510 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1511
1512 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1513
1514 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1515 CHECK_VALID_SIZE(outputs.size(), 1);
1516
1517 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1518 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1519
1520 armnn::MeanDescriptor desc;
1521 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1522 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1523 desc.m_Axis = axis;
1524
1525 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1526 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1527
1528 desc.m_KeepDims =
1529 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1530 true : false;
1531
1532 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1533 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1534
1535 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1536
1537 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1538 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1539
1540 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1541 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1542}
1543
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001544void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1545{
1546 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1547
1548 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1549
1550 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1551 CHECK_VALID_SIZE(outputs.size(), 1);
1552
1553 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1554 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1555
1556 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1557 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1558
1559 size_t step = 2;
1560 armnn::PadDescriptor desc;
1561 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1562 {
1563 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1564 }
1565
1566 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1567 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1568
1569 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1570 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1571
1572 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1573 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1574
1575 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1576 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1577}
1578
Finn Williamsc42c3842019-01-22 14:18:11 +00001579
Sadik Armagan58f39192018-09-17 14:14:39 +01001580void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1581{
Finn Williamsc42c3842019-01-22 14:18:11 +00001582 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001583}
1584
1585void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1586{
Finn Williamsc42c3842019-01-22 14:18:11 +00001587 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1588}
Sadik Armagan58f39192018-09-17 14:14:39 +01001589
Finn Williamsc42c3842019-01-22 14:18:11 +00001590void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1591{
1592 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1593}
1594
Nina Drozd99851762019-04-09 09:37:38 +01001595void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1596{
1597 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1598}
1599
Finn Williamsc42c3842019-01-22 14:18:11 +00001600
1601void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1602{
1603 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001604 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1605 boost::ignore_unused(operatorPtr);
1606
1607 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1608 CHECK_VALID_SIZE(inputs.size(), 1);
1609
1610 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1611 CHECK_VALID_SIZE(outputs.size(), 1);
1612
Finn Williamsc42c3842019-01-22 14:18:11 +00001613 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001614 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001615 activationDesc.m_Function = activationType;
1616
1617 switch (activationType)
1618 {
1619 case ActivationFunction::ReLu:
1620 {
1621 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1622 break;
1623 }
1624 case ActivationFunction::BoundedReLu:
1625 {
1626 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1627 activationDesc.m_A = 6.0f;
1628 activationDesc.m_B = 0.0f;
1629 break;
1630 }
1631 case ActivationFunction::Sigmoid:
1632 {
1633 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1634 break;
1635 }
Nina Drozd99851762019-04-09 09:37:38 +01001636 case ActivationFunction::TanH:
1637 {
1638 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1639 activationDesc.m_A = 1.0f;
1640 activationDesc.m_B = 1.0f;
1641 break;
1642 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001643 default:
1644 {
1645 throw ParseException(
1646 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1647 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1648 }
1649 }
1650
1651 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001652
1653 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1654 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1655
1656 // register the input connection slots for the layer, connections are made after all layers have been created
1657 // only the tensors for the inputs are relevant, exclude the const tensors
1658 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1659 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1660
1661 // register the output connection slots for the layer, connections are made after all layers have been created
1662 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1663 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1664}
Sadikb94967b2018-09-19 15:30:00 +01001665armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1666 const std::vector<int32_t> & targetDimsIn)
1667{
1668 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1669 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1670
1671 if (stretchDim != targetDimsIn.end())
1672 {
1673 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1674 {
1675 throw ParseException(
1676 boost::str(
1677 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1678 }
1679
1680 auto targetNumElements =
1681 boost::numeric_cast<unsigned int>(
1682 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1683
1684 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1685 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1686 }
1687
1688 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1689
1690 TensorInfo reshapeInfo = inputTensorInfo;
1691 reshapeInfo.SetShape(outputShape);
1692
1693 return reshapeInfo;
1694}
1695
1696void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1697{
1698 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1699
1700 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001701
1702 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1703 CHECK_VALID_SIZE(outputs.size(), 1);
1704
1705 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1706 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1707
1708 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001709 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1710 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001711 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1712
kevmay0171972a82018-12-17 14:28:03 +00001713 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001714 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1715 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001716 {
1717 std::stringstream ss;
1718 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001719 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001720 << " does not equal output shape "
1721 << actualOutputTensorInfo.GetShape()
1722 << ": "
1723 << CHECK_LOCATION().AsString();
1724 throw ParseException(ss.str());
1725 }
1726
Sadikb94967b2018-09-19 15:30:00 +01001727 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001728 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001729
1730 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1731 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001732 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001733
1734 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1735 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1736
1737 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1738 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1739}
1740
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001741void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1742{
1743 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1744
1745 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1746 CHECK_VALID_SIZE(inputs.size(), 2);
1747
1748 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1749 CHECK_VALID_SIZE(outputs.size(), 1);
1750
1751 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1752
1753 // Data for the parsed tensor args (size) must be stored locally.
1754 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1755
1756 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1757 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1758
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001759 ResizeDescriptor desc;
1760 desc.m_Method = armnn::ResizeMethod::Bilinear;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001761 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001762 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1763 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001764
1765 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001766 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001767
1768 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1769 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1770
1771 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1772 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1773
1774 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1775 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1776}
1777
Sadik Armagan479045b2018-10-01 11:51:37 +01001778void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1779{
1780 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1781
1782 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1783 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1784
1785 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1786
1787 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1788 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1789 CHECK_VALID_SIZE(outputs.size(), 1);
1790
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001791 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1792 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001793
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001794 const unsigned int concatDimInput = static_cast<unsigned int>(
1795 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001796
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001797 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1798 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001799
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001800 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001801
1802 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1803 {
1804 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1805
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001806 // This set up concatDescriptor view origin
1807 armnnUtils::ProcessConcatInputTensorInfo(
1808 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001809 }
1810
1811 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01001812 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01001813
1814 BOOST_ASSERT(layer != nullptr);
1815
1816 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1817 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001818
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001819 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001820
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001821 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001822
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001823 // add fused activation layer
1824 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001825
Sadik Armagan479045b2018-10-01 11:51:37 +01001826 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1827 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1828}
1829
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001830void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1831{
1832 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1833
1834 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1835 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1836
1837 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1838
1839 FullyConnectedDescriptor desc;
1840 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001841 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001842
1843 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1844 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1845 CHECK_VALID_SIZE(outputs.size(), 1);
1846
1847 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1848
1849 // Fully Connected Layer accepts two dimensional weights input
1850 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1851 if (weightsDimension != 2)
1852 {
1853 throw ParseException(
1854 boost::str(
1855 boost::format(
1856 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1857 "Node %2%")
1858 % weightsDimension
1859 % CHECK_LOCATION().AsString()));
1860 }
1861
Matteo Martincigh747ef822018-12-18 09:26:39 +00001862 auto filterTensorAndData = CreateConstTensor(inputs[1],
1863 filterTensorInfo,
1864 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001865 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001866 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1867
1868 if (inputs.size() == 3)
1869 {
1870 desc.m_BiasEnabled = true;
1871 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001872 auto biasTensorAndData = CreateConstTensor(inputs[2],
1873 biasTensorInfo,
1874 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001875 layer = m_Network->AddFullyConnectedLayer(desc,
1876 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001877 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001878 layerName.c_str());
1879 }
1880 else
1881 {
1882 layer = m_Network->AddFullyConnectedLayer(desc,
1883 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001884 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001885 layerName.c_str());
1886 }
1887 BOOST_ASSERT(layer != nullptr);
1888
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001889 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1890
1891 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1892
1893 if (inputTensorInfo.GetNumDimensions() > 2)
1894 {
1895 // Add reshape to flatten to 2D [batch_size, input_size],
1896 // where "input_size" corresponds to the number of inputs to the layer,
1897 // matching the second dimension of weights,
1898 // and "batch_size" is calculated by dividing the number of elements by "input_size".
1899 std::vector<unsigned int> reshapedDimensions(2);
1900 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
1901 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
1902
1903 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
1904 {
1905 throw ParseException(
1906 boost::str(
1907 boost::format(
1908 "Failed to deduce input tensor shape from filter size %1%")
1909 % reshapedDimensions[1]
1910 % CHECK_LOCATION().AsString()));
1911 }
1912
1913 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
1914 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
1915
1916 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
1917 armnn::ReshapeDescriptor desc;
1918 desc.m_TargetShape = reshapedTensorInfo.GetShape();
1919 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
1920
1921 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
1922 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1923
1924 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
1925 }
1926 else
1927 {
1928 // register the input connection slot for the layer
1929 // only the tensors for the inputs are relevant, exclude the const tensors
1930 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1931 }
1932
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001933 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1934 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1935
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001936 // we need to add the activation layer and fortunately we don't need to care about the data layout
1937 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1938 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001939
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001940 // register the output connection slots for the layer, connections are made after all layers have been created
1941 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1942 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1943}
1944
keidav011b3e2ea2019-02-21 10:07:37 +00001945void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1946{
1947 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1948
1949 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1950
1951 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1952 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1953 CHECK_VALID_SIZE(outputs.size(), 4);
1954
1955 // Obtain custom options from flexbuffers
1956 auto custom_options = operatorPtr->custom_options;
1957 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1958
1959 // Obtain descriptor information from tf lite
1960 DetectionPostProcessDescriptor desc;
1961 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1962 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1963 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1964 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1965 desc.m_NumClasses = m["num_classes"].AsUInt32();
1966 desc.m_ScaleH = m["h_scale"].AsFloat();
1967 desc.m_ScaleW = m["w_scale"].AsFloat();
1968 desc.m_ScaleX = m["x_scale"].AsFloat();
1969 desc.m_ScaleY = m["y_scale"].AsFloat();
1970
keidav0107d58c72019-02-26 11:57:39 +00001971 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00001972 {
keidav0107d58c72019-02-26 11:57:39 +00001973 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00001974 }
1975 if (!(m["detections_per_class"].IsNull()))
1976 {
1977 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1978 }
1979
1980 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1981 {
1982 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1983 "must be positive and less than or equal to 1.");
1984 }
1985
1986 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
1987 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
1988 armnn::Optional<armnn::PermutationVector&>());
1989
1990 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
1991 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
1992 layerName.c_str());
1993
1994 BOOST_ASSERT(layer != nullptr);
1995
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001996 // The model does not specify the output shapes.
1997 // The output shapes are calculated from the max_detection and max_classes_per_detection.
1998 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
1999 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2000 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2001 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2002 m_OverridenOutputShapes.push_back({ 1 });
2003
keidav011b3e2ea2019-02-21 10:07:37 +00002004 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2005 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002006 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002007 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2008 }
2009
2010 // Register the input connection slots for the layer, connections are made after all layers have been created
2011 // only the tensors for the inputs are relevant, exclude the const tensors
2012 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2013 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2014
2015 // Register the output connection slots for the layer, connections are made after all layers have been created
2016 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2017 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2018 outputTensorIndexes[1],
2019 outputTensorIndexes[2],
2020 outputTensorIndexes[3]});
2021}
2022
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002023/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2024void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2025{
2026 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2027
2028 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2029 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2030 CHECK_VALID_SIZE(outputs.size(), 1);
2031
2032 if (inputs.size() < 1)
2033 {
2034 throw ParseException("Pack must have at least one input.");
2035 }
2036
2037 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2038 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2039
2040 StackDescriptor desc;
2041 desc.m_Axis = static_cast<uint32_t>(options->axis);
2042 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2043
2044 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2045 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2046 desc.m_InputShape = inputTensorInfo.GetShape();
2047
2048 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2049 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2050
2051 BOOST_ASSERT(layer != nullptr);
2052
2053 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2054 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2055
2056 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2057 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2058
2059 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2060 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2061}
2062
Nina Drozd200e3802019-04-15 09:47:39 +01002063void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2064{
2065 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2066
2067 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2068 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2069
2070 // This unpackAxis indicates the axis to unpack
2071 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2072
2073 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2074 CHECK_VALID_SIZE(inputs.size(), 1);
2075
2076 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002077
2078 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2079 {
2080 throw ParseException(
2081 boost::str(
2082 boost::format(
2083 "The unpack axis: %1% cannot be greater than or equal to "
2084 "the number of input dimension %2% %3%")
2085 % unpackAxis
2086 % inputTensorInfo.GetNumDimensions()
2087 % CHECK_LOCATION().AsString()));
2088 }
2089
Nina Drozd200e3802019-04-15 09:47:39 +01002090 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2091 // If num is not defined, automatically infer from the length of the dimension axis.
2092 if(unpackNum == 0)
2093 {
2094 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2095 }
2096
2097 // If unpack number cannot be inferred and is still zero, throw ParseException.
2098 if(unpackNum == 0)
2099 {
2100 throw ParseException("Number to unpack must greater than zero.");
2101 }
2102
2103 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2104 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2105
2106 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2107 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2108
2109 // Add current input shape to unpackDimSizes
2110 for (unsigned int i = 0; i < inputDimSize; ++i)
2111 {
2112 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2113 }
2114
2115 if (unpackDimSizes[unpackAxis] != unpackNum)
2116 {
2117 throw ParseException("Number to unpack must be the same as length of the dimension to "
2118 "unpack along.");
2119 }
2120
2121 unpackDimSizes[unpackAxis] /= unpackNum;
2122
2123 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2124 for (unsigned int j = 0; j < unpackNum; ++j)
2125 {
2126 // Set the size of the views.
2127 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2128 {
2129 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2130 }
2131 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2132 }
2133
2134 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2135 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2136
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002137 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2138 unpackDimSizes.data());
2139
Nina Drozd200e3802019-04-15 09:47:39 +01002140 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2141 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2142
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002143 // Reshape to remove unpacked dimension
2144 unsigned int reshapedNumDimensions = inputDimSize - 1;
2145 std::vector<unsigned int> reshapedDimensions(reshapedNumDimensions);
Nina Drozd200e3802019-04-15 09:47:39 +01002146
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002147 unsigned int reshapeIndex = 0;
2148 for (unsigned int i = 0; i < inputDimSize; ++i)
Nina Drozd200e3802019-04-15 09:47:39 +01002149 {
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002150 if (i == unpackAxis)
2151 {
2152 continue;
2153 }
2154 reshapedDimensions[reshapeIndex++] = unpackDimSizes[i];
Nina Drozd200e3802019-04-15 09:47:39 +01002155 }
2156
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002157 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2158 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2159 {
2160 armnn::TensorInfo reshapedTensorInfo = inputTensorInfo;
2161 reshapedTensorInfo.SetShape(armnn::TensorShape{ reshapedNumDimensions, reshapedDimensions.data() });
2162
2163 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2164 armnn::ReshapeDescriptor desc;
2165 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2166 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2167
2168 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape, inputTensorInfo.GetDataType()));
2169 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2170
2171 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2172
2173 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2174 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2175 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2176 }
Nina Drozd200e3802019-04-15 09:47:39 +01002177}
2178
Nina Drozd0324f482019-04-08 10:52:10 +01002179void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2180{
2181 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2182
2183 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2184 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2185
2186 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2187
Nina Drozd200e3802019-04-15 09:47:39 +01002188 // If number of splits cannot be inferred and is zero, throw ParseException.
2189 if(numSplits == 0)
2190 {
2191 throw ParseException("Number to splits must greater than zero.");
2192 }
2193
Nina Drozd0324f482019-04-08 10:52:10 +01002194 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2195 CHECK_VALID_SIZE(inputs.size(), 2);
2196 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2197 CHECK_VALID_SIZE(outputs.size(), numSplits);
2198
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002199 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2200 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002201
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002202 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2203 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2204 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2205
2206 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2207 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002208
2209 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2210 if (splitDim == 0 || splitDim == 2)
2211 {
2212 throw ParseException(
2213 boost::str(
2214 boost::format(
2215 "Dimension %1% for split is not supported by Armnn. %2%")
2216 % splitDim
2217 % CHECK_LOCATION().AsString()));
2218 }
2219
2220 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002221 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002222 {
2223 throw ParseException(
2224 boost::str(
2225 boost::format(
2226 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002227 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002228 % inputTensorInfo.GetNumDimensions()
2229 % MaxNumOfTensorDimensions
2230 % CHECK_LOCATION().AsString()));
2231 }
2232
2233 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2234
2235 // Add current input shape to splitterDimSizes
2236 for (unsigned int i = 0; i < inputDimSize; ++i)
2237 {
2238 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2239 }
2240
2241 if (splitterDimSizes[splitDim] % numSplits != 0)
2242 {
2243 throw ParseException("Number of splits must evenly divide the dimension");
2244 }
2245 splitterDimSizes[splitDim] /= numSplits;
2246
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002247 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002248 for (unsigned int j = 0; j < numSplits; ++j)
2249 {
2250 // Set the size of the views.
2251 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2252 {
2253 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2254 }
2255 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2256 }
2257
2258 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2259 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2260
2261 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002262 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002263
2264 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2265 splitterDimSizes.data());
2266
2267 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2268 {
2269 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(outShape,
2270 inputTensorInfo.GetDataType()));
2271 }
2272
2273 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2274 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2275}
2276
Sadik Armagan58f39192018-09-17 14:14:39 +01002277armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2278 unsigned int outputSlot,
2279 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002280{
2281 ActivationDescriptor activationDesc;
2282 std::string layerName = prevLayer->GetName();
2283
2284 switch(activationType)
2285 {
2286 case tflite::ActivationFunctionType_NONE:
2287 {
2288 // this is a no-op: return previous layer
2289 return prevLayer;
2290 }
2291 case tflite::ActivationFunctionType_RELU:
2292 {
2293 activationDesc.m_Function = ActivationFunction::ReLu;
2294 layerName += ":RELU";
2295 break;
2296 }
2297 case tflite::ActivationFunctionType_RELU6:
2298 {
2299 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2300 activationDesc.m_A = 6.0f;
2301 activationDesc.m_B = 0.0f;
2302 layerName += ":RELU6";
2303 break;
2304 }
2305 case tflite::ActivationFunctionType_TANH:
2306 {
2307 activationDesc.m_Function = ActivationFunction::TanH;
2308 activationDesc.m_A = 1.0f;
2309 activationDesc.m_B = 1.0f;
2310 layerName += ":TANH";
2311 break;
2312 }
2313
2314 // I only put these here as a reminder what others we could support
2315 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2316 case tflite::ActivationFunctionType_SIGN_BIT:
2317 default:
2318 {
2319 throw ParseException(
2320 boost::str(
2321 boost::format("TfLite parser doesn't suppport fused activation: "
2322 "%1%/%2% %3% ") %
2323 activationType %
2324 tflite::EnumNameActivationFunctionType(activationType) %
2325 CHECK_LOCATION().AsString()));
2326
2327 }
2328 }
2329
2330 IConnectableLayer* activationLayer =
2331 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2332
2333 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2334 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2335 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2336 return activationLayer;
2337}
2338
2339TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2340{
2341 if (fileName == nullptr)
2342 {
2343 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2344 CHECK_LOCATION().AsString()));
2345 }
2346 boost::system::error_code errorCode;
2347 boost::filesystem::path pathToFile(fileName);
2348 if (!boost::filesystem::exists(pathToFile, errorCode))
2349 {
2350 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2351 fileName %
2352 errorCode %
2353 CHECK_LOCATION().AsString()));
2354 }
2355 std::ifstream file(fileName, std::ios::binary);
2356 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2357 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2358 fileContent.size());
2359}
2360
2361TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2362{
2363 if (binaryContent == nullptr)
2364 {
2365 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2366 CHECK_LOCATION().AsString()));
2367 }
2368 flatbuffers::Verifier verifier(binaryContent, len);
2369 if (verifier.VerifyBuffer<tflite::Model>() == false)
2370 {
2371 throw ParseException(
2372 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2373 "flatbuffers format. size:%1% %2%") %
2374 len %
2375 CHECK_LOCATION().AsString()));
2376 }
2377 return tflite::UnPackModel(binaryContent);
2378}
2379
2380TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2381 size_t subgraphIndex,
2382 size_t operatorIndex)
2383{
2384 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2385
Derek Lambertiff05cc52019-04-26 13:05:17 +01002386 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2387 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002388
2389 size_t inputCount = operatorPtr->inputs.size();
2390 TensorRawPtrVector result(inputCount);
2391 for (size_t i=0; i<inputCount; ++i)
2392 {
2393 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002394 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002395 }
2396 return result;
2397}
2398
2399TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2400 size_t subgraphIndex,
2401 size_t operatorIndex)
2402{
2403 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2404
Derek Lambertiff05cc52019-04-26 13:05:17 +01002405 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2406 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002407
2408 size_t outputCount = operatorPtr->outputs.size();
2409 TensorRawPtrVector result(outputCount);
2410 for (size_t i=0; i<outputCount; ++i)
2411 {
2412 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2413 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002414 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002415 }
2416 return result;
2417}
2418
2419TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2420 size_t subgraphIndex)
2421{
2422 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002423 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002424
Derek Lambertiff05cc52019-04-26 13:05:17 +01002425 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002426 TensorIdRawPtrVector result(inputCount);
2427 for (size_t i=0; i<inputCount; ++i)
2428 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002429 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002430 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002431 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002432 }
2433 return result;
2434}
2435
2436TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2437 size_t subgraphIndex)
2438{
2439 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002440 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002441
Derek Lambertiff05cc52019-04-26 13:05:17 +01002442 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002443 TensorIdRawPtrVector result(outputCount);
2444 for (size_t i=0; i<outputCount; ++i)
2445 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002446 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2447 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002448 }
2449 return result;
2450}
2451
2452std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2453 size_t subgraphIndex,
2454 size_t operatorIndex)
2455{
2456 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002457 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2458 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002459 return operatorPtr->inputs;
2460}
2461
2462std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2463 size_t subgraphIndex,
2464 size_t operatorIndex)
2465{
2466 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002467 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2468 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002469 return operatorPtr->outputs;
2470}
2471
2472void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2473 size_t operatorIndex,
2474 IConnectableLayer* layer,
2475 const std::vector<unsigned int>& tensorIndexes)
2476{
2477 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2478 BOOST_ASSERT(layer != nullptr);
2479 if (tensorIndexes.size() != layer->GetNumInputSlots())
2480 {
2481 throw ParseException(
2482 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2483 " for subgraph:%3% operator index:%4% %5%") %
2484 tensorIndexes.size() %
2485 layer->GetNumInputSlots() %
2486 subgraphIndex %
2487 operatorIndex %
2488 CHECK_LOCATION().AsString()));
2489 }
2490
2491 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2492 {
2493 unsigned int tensorIndex = tensorIndexes[slotIndex];
2494 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2495 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2496 }
2497}
2498
2499void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2500 size_t operatorIndex,
2501 IConnectableLayer* layer,
2502 const std::vector<unsigned int>& tensorIndexes)
2503{
2504 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2505 BOOST_ASSERT(layer != nullptr);
2506 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2507 {
2508 throw ParseException(
2509 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2510 " for subgraph:%3% operator index:%4% %5%") %
2511 tensorIndexes.size() %
2512 layer->GetNumOutputSlots() %
2513 subgraphIndex %
2514 operatorIndex %
2515 CHECK_LOCATION().AsString()));
2516 }
2517
2518 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2519 {
2520 unsigned int tensorIndex = tensorIndexes[slotIndex];
2521 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2522 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2523 }
2524}
2525
2526void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2527{
2528 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2529
2530 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2531 for (auto const & tensorIdAndPtr : inputs)
2532 {
2533 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2534 IConnectableLayer* layer =
2535 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2536
2537 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2538 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2539
2540 RegisterOutputSlots(subgraphIndex,
2541 VIRTUAL_OPERATOR_ID,
2542 layer,
2543 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2544 }
2545}
2546
2547void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2548{
2549 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2550
2551 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2552 for (auto const & tensorIdAndPtr : outputs)
2553 {
2554 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2555 IConnectableLayer* layer =
2556 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2557
2558 RegisterInputSlots(subgraphIndex,
2559 VIRTUAL_OPERATOR_ID,
2560 layer,
2561 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2562 }
2563}
2564
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002565void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2566{
2567 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2568
Derek Lambertiff05cc52019-04-26 13:05:17 +01002569 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002570 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2571 {
2572 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2573 {
2574 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2575 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2576 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002577 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002578 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2579 auto tensorAndData = CreateConstTensor(tensorPtr,
2580 tensorInfo,
2581 armnn::Optional<armnn::PermutationVector&>());
2582
2583 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2584 IConnectableLayer *layer =
2585 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2586
2587 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2588 RegisterOutputSlots(subgraphIndex,
2589 VIRTUAL_OPERATOR_ID,
2590 layer,
2591 { tensorIndex });
2592
2593 }
2594 }
2595 }
2596}
2597
telsoa01c577f2c2018-08-31 09:22:23 +01002598// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2599TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2600{
2601 CHECK_BUFFER(model, bufferIndex);
2602 return model->buffers[bufferIndex].get();
2603}
2604
Matteo Martincigh747ef822018-12-18 09:26:39 +00002605template<typename T>
2606std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2607TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2608 TfLiteParser::TensorRawPtr tensorPtr,
2609 armnn::TensorInfo& tensorInfo,
2610 armnn::Optional<armnn::PermutationVector&> permutationVector)
2611{
2612 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2613 tensorPtr,
2614 tensorInfo,
2615 permutationVector);
2616 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2617 return std::make_pair(constData.first, std::move(storage));
2618}
2619
telsoa01c577f2c2018-08-31 09:22:23 +01002620std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2621TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002622 armnn::TensorInfo& tensorInfo,
2623 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002624{
2625 CHECK_TENSOR_PTR(tensorPtr);
2626 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2627 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2628
2629 switch (tensorInfo.GetDataType())
2630 {
2631 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002632 return CreateConstTensorAndStoreData<float>(bufferPtr,
2633 tensorPtr,
2634 tensorInfo,
2635 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002636 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002637 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2638 tensorPtr,
2639 tensorInfo,
2640 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002641 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002642 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2643 tensorPtr,
2644 tensorInfo,
2645 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002646 default:
2647 {
2648 std::stringstream errString;
2649 errString << "Unexpected datatype when creating const tensor: "
2650 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2651 << " shape:" << tensorInfo.GetShape()
2652 << CHECK_LOCATION().AsString();
2653 throw ParseException(errString.str());
2654 }
2655 }
2656}
2657
2658BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2659 const std::string& name) const
2660{
2661 CHECK_SUBGRAPH(m_Model, subgraphId);
2662 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2663 for (auto const & input : inputs)
2664 {
2665 if (input.second->name == name)
2666 {
2667 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2668 return std::make_pair(bindingId, ToTensorInfo(input.second));
2669 }
2670 }
2671
2672 std::stringstream bindings;
2673 for (auto const & input : inputs)
2674 {
2675 bindings << "'" << input.second->name << "' ";
2676 }
2677
2678 throw ParseException(
2679 boost::str(
2680 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2681 "Possible inputs are: [%3%] %4%") %
2682 subgraphId %
2683 name %
2684 bindings.str() %
2685 CHECK_LOCATION().AsString()));
2686}
2687
2688BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2689 const std::string& name) const
2690{
2691 CHECK_SUBGRAPH(m_Model, subgraphId);
2692 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002693 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002694 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002695 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002696 if (output.second->name == name)
2697 {
2698 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002699 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2700 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2701 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002702 }
2703 }
2704
2705 std::stringstream bindings;
2706 for (auto const & output : outputs)
2707 {
2708 bindings << "'" << output.second->name << "' ";
2709 }
2710
2711 throw ParseException(
2712 boost::str(
2713 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2714 "Possible outputs are: [%3%] %4%") %
2715 subgraphId %
2716 name %
2717 bindings.str() %
2718 CHECK_LOCATION().AsString()));
2719}
2720
2721size_t TfLiteParser::GetSubgraphCount() const
2722{
2723 return m_Model->subgraphs.size();
2724}
2725
2726std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2727{
2728 CHECK_SUBGRAPH(m_Model, subgraphId);
2729 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2730 std::vector<std::string> result;
2731 result.reserve(inputs.size());
2732 for (auto const & input : inputs)
2733 {
2734 result.push_back(input.second->name);
2735 }
2736 return result;
2737}
2738
2739std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2740{
2741 CHECK_SUBGRAPH(m_Model, subgraphId);
2742 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2743 std::vector<std::string> result;
2744 result.reserve(outputs.size());
2745 for (auto const & output : outputs)
2746 {
2747 result.push_back(output.second->name);
2748 }
2749 return result;
2750}
2751
2752ITfLiteParser* ITfLiteParser::CreateRaw()
2753{
2754 return new TfLiteParser();
2755}
2756
2757ITfLiteParserPtr ITfLiteParser::Create()
2758{
2759 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2760}
2761
2762void ITfLiteParser::Destroy(ITfLiteParser* parser)
2763{
2764 delete parser;
2765}
2766
2767TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2768: m_FloatData(std::move(data))
2769, m_Uint8Data(nullptr)
2770, m_Int32Data(nullptr)
2771{
2772}
2773
2774TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2775: m_FloatData(nullptr)
2776, m_Uint8Data(std::move(data))
2777, m_Int32Data(nullptr)
2778{
2779}
2780
2781TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2782: m_FloatData(nullptr)
2783, m_Uint8Data(nullptr)
2784, m_Int32Data(std::move(data))
2785{
2786}
2787
2788} // armnnTfLiteParser