blob: d1cef31446f4d0d34be49a1588bd6af1e4e49da6 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026
27#include <fstream>
28#include <algorithm>
29#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010030#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000031#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010032
33using namespace armnn;
34using armnn::CheckLocation;
35namespace armnnTfLiteParser
36{
37namespace
38{
jimfly01c25411c2018-11-14 17:47:22 +000039
telsoa01c577f2c2018-08-31 09:22:23 +010040const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
41
42void CheckSubgraph(const TfLiteParser::ModelPtr & model,
43 size_t subgraphIndex,
44 const CheckLocation & location)
45{
46 if (model.get() == nullptr)
47 {
48 throw ParseException(
49 boost::str(
50 boost::format("%1% was called with invalid (null) model. "
51 "Possible reason is that the model is not yet loaded and Unpack(ed). "
52 "subgraph:%2% at %3%") %
53 location.m_Function %
54 subgraphIndex %
55 location.FileLine()));
56 }
57 else if (subgraphIndex >= model->subgraphs.size())
58 {
59 throw ParseException(
60 boost::str(
61 boost::format("%1% was called with an invalid subgraph index. "
62 "subgraph:%2% at %3%") %
63 location.m_Function %
64 subgraphIndex %
65 location.FileLine()));
66 }
67}
68
69#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
70 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
71
72void CheckModel(const TfLiteParser::ModelPtr & model,
73 size_t subgraphIndex,
74 size_t operatorIndex,
75 const CheckLocation & location)
76{
77 if (model.get() == nullptr)
78 {
79 throw ParseException(
80 boost::str(
81 boost::format("%1% was called with invalid (null) model. "
82 "Possible reason is that the model is not yet loaded and Unpack(ed). "
83 "subgraph:%2% operator:%3% at %4%") %
84 location.m_Function %
85 subgraphIndex %
86 operatorIndex %
87 location.FileLine()));
88 }
89 else if (subgraphIndex >= model->subgraphs.size())
90 {
91 throw ParseException(
92 boost::str(
93 boost::format("%1% was called with an invalid subgraph index. "
94 "subgraph:%2% operator:%3% at %4%") %
95 location.m_Function %
96 subgraphIndex %
97 operatorIndex %
98 location.FileLine()));
99 }
100 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
101 operatorIndex != VIRTUAL_OPERATOR_ID)
102 {
103 throw ParseException(
104 boost::str(
105 boost::format("%1% was called with an invalid operator index. "
106 "subgraph:%2% operator:%3% at %4%") %
107 location.m_Function %
108 subgraphIndex %
109 operatorIndex %
110 location.FileLine()));
111 }
112}
113
114#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
115 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
116
117void CheckTensor(const TfLiteParser::ModelPtr & model,
118 size_t subgraphIndex,
119 size_t tensorIndex,
120 const CheckLocation & location)
121{
122 // not checking model, because I assume CHECK_MODEL already run
123 // and checked that. An assert would do.
124 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
125
126 // also subgraph index should be checked by CHECK_MODEL so
127 // I only add an assert here
128 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
129
130 // the tensor index is the only one to check here
131 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
132 {
133 throw ParseException(
134 boost::str(
135 boost::format("%1% was called with an invalid tensor index. "
136 "subgraph:%2% tensor:%3% at %4%") %
137 location.m_Function %
138 subgraphIndex %
139 tensorIndex %
140 location.FileLine()));
141 }
142}
143
144#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
145 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
146
147void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
148 const CheckLocation & location)
149{
150 if (rawPtr == nullptr)
151 {
152 throw ParseException(
153 boost::str(
154 boost::format("%1% was called with a null tensor pointer. "
155 "at %2%") %
156 location.m_Function %
157 location.FileLine()));
158
159 }
160}
161
162#define CHECK_TENSOR_PTR(TENSOR_PTR) \
163 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
164
165void CheckBuffer(const TfLiteParser::ModelPtr & model,
166 size_t bufferIndex,
167 const CheckLocation & location)
168{
169 if (model.get() == nullptr)
170 {
171 throw ParseException(
172 boost::str(
173 boost::format("%1% was called with invalid (null) model. "
174 "Possible reason is that the model is not yet loaded and Unpack(ed). "
175 "buffer:%2% at %3%") %
176 location.m_Function %
177 bufferIndex %
178 location.FileLine()));
179 }
180 else if (bufferIndex >= model->buffers.size())
181 {
182 throw ParseException(
183 boost::str(
184 boost::format("%1% was called with an invalid buffer index. "
185 "buffer index:%2% at %3%") %
186 location.m_Function %
187 bufferIndex %
188 location.FileLine()));
189 }
190 else if (model->buffers[bufferIndex].get() == nullptr)
191 {
192 throw ParseException(
193 boost::str(
194 boost::format("The buffer #%1% is null. %3%") %
195 bufferIndex %
196 location.AsString()));
197 }
198}
199
200#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
201 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
202
203void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
204 const armnn::TensorInfo & tensorInfo,
205 uint32_t bufferId,
206 const CheckLocation & location)
207{
208 if (bufferPtr == nullptr)
209 {
210 throw ParseException(
211 boost::str(
212 boost::format("BufferPtr is null for buffer:%1%. %2%") %
213 bufferId %
214 location.AsString()));
215 }
216 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
217 tensorInfo.GetNumBytes() > bufferPtr->data.size())
218 {
219 std::stringstream ss;
220 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
221 << "For tensor: " << tensorInfo.GetShape()
222 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
223 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
224 throw ParseException(ss.str());
225 }
226}
227
228#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
229 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
230
231bool IsActivationSupported(tflite::ActivationFunctionType activationType)
232{
233 switch(activationType)
234 {
235 case tflite::ActivationFunctionType_NONE:
236 case tflite::ActivationFunctionType_RELU:
237 case tflite::ActivationFunctionType_RELU6:
238 case tflite::ActivationFunctionType_TANH:
239 {
240 return true;
241 }
242 default:
243 {
244 return false;
245 }
246 }
247}
248
249#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
250 do { \
251 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
252 { \
253 throw ParseException( \
254 boost::str( \
255 boost::format("TfLite parser doesn't suppport fused activation: " \
256 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
257 OPTION->fused_activation_function % \
258 tflite::EnumNameActivationFunctionType(\
259 OPTION->fused_activation_function) % \
260 __func__ % \
261 SUBGRAPH_INDEX % \
262 OPERATOR_INDEX % \
263 CHECK_LOCATION().FileLine())); \
264 } \
265 } while(false)
266
267
268std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
269{
270 std::vector<unsigned int> result;
271 result.reserve(in.size());
272 for (auto & i : in)
273 {
274 result.push_back(CHECKED_NON_NEGATIVE(i));
275 }
276 return result;
277}
278
279void CalcPadding(uint32_t inputSize,
280 uint32_t filterSize,
281 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100282 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100283 uint32_t& paddingFront,
284 uint32_t& paddingBack,
285 tflite::Padding padding)
286{
287 paddingFront = 0;
288 paddingBack = 0;
289 if (padding == tflite::Padding_SAME)
290 {
291 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100292 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
293 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100294 if (temp > inputSize)
295 {
296 paddingFront = (temp - inputSize) / 2;
297 paddingBack = (temp - inputSize) - paddingFront;
298 }
299 }
300}
301
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000302armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100303{
304 armnn::DataType type;
305 CHECK_TENSOR_PTR(tensorPtr);
306
307 switch (tensorPtr->type)
308 {
309 case tflite::TensorType_UINT8:
310 type = armnn::DataType::QuantisedAsymm8;
311 break;
312 case tflite::TensorType_FLOAT32:
313 type = armnn::DataType::Float32;
314 break;
315 case tflite::TensorType_INT32:
316 type = armnn::DataType::Signed32;
317 break;
318
319 default:
320 {
321 CheckLocation location = CHECK_LOCATION();
322 throw ParseException(
323 boost::str(
324 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
325 tensorPtr->type %
326 tflite::EnumNameTensorType(tensorPtr->type) %
327 tensorPtr->name %
328 location.AsString()));
329 }
330 }
331
332 float quantizationScale = 0.0f;
333 int32_t quantizationOffset = 0;
334
335 if (tensorPtr->quantization.get())
336 {
337 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
338 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
339
340 if (tensorPtr->quantization->scale.size() == 1)
341 {
342 quantizationScale = tensorPtr->quantization->scale[0];
343 }
344 if (tensorPtr->quantization->zero_point.size() == 1)
345 {
346 // NOTE: we lose precision here when converting from 64 bit to 32
347 // but this is what we support at the monent in ArmNN
348 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
349 }
350 }
351
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100352 std::vector<unsigned int> safeShape = shapes;
353 if (safeShape.size() == 0)
354 {
355 safeShape.push_back(1);
356 }
357
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // two statements (on purpose) for easier debugging:
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100359 armnn::TensorInfo result(static_cast<unsigned int>(safeShape.size()),
360 safeShape.data(),
telsoa01c577f2c2018-08-31 09:22:23 +0100361 type,
362 quantizationScale,
363 quantizationOffset);
364 return result;
365}
366
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000367armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
368{
369 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
370 return ToTensorInfo(tensorPtr, dimensions);
371}
372
telsoa01c577f2c2018-08-31 09:22:23 +0100373template<typename T>
374std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
375CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
376 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000377 armnn::TensorInfo& tensorInfo,
378 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100379{
380 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
381 BOOST_ASSERT_MSG(bufferPtr != nullptr,
382 boost::str(
383 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
384
385 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000386
387 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
388 {
389 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000390 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
391 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000392 }
393 else
394 {
395 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
396 }
397
telsoa01c577f2c2018-08-31 09:22:23 +0100398 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
399}
400
telsoa01c577f2c2018-08-31 09:22:23 +0100401armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
402{
403 // generate the binding id by shifting the tensor id by 8 bit
404 // and add the subgraph id, which allows 256 subgraphs
405 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
406}
407
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000408bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
409{
410 const unsigned int actualSize = actual.GetNumDimensions();
411 if (actualSize != expected.size())
412 {
413 return false;
414 }
415
416 for (unsigned int i = 0u; i < actualSize; i++)
417 {
418 if (expected[i] < 0 ||
419 actual[i] != static_cast<unsigned int>(expected[i]))
420 {
421 return false;
422 }
423 }
424
425 return true;
426}
427
telsoa01c577f2c2018-08-31 09:22:23 +0100428} // <anonymous>
429
430TfLiteParser::TfLiteParser()
431: m_Network(nullptr, nullptr)
432, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
433{
434 // register supported operators
435 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200436 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100437 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100438 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
439 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000440 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100441 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000442 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Matthew Jackson28c94572019-07-18 10:47:03 +0100443 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100444 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200445 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200446 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan58f39192018-09-17 14:14:39 +0100447 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
448 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100449 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -0200450 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
Sadik Armagan479045b2018-10-01 11:51:37 +0100451 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
Bruno Goncalvesbaded142019-02-08 19:02:48 -0200452 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100453 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalves451d95b2019-02-12 22:59:22 -0200454 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200455 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200456 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200457 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200458 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Matthew Jacksonbcca1f42019-07-16 11:39:21 +0100459 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200460 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
Nina Drozd0324f482019-04-08 10:52:10 +0100461 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Nina Drozd99851762019-04-09 09:37:38 +0100462 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100463 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
Nina Drozd200e3802019-04-15 09:47:39 +0100464 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
telsoa01c577f2c2018-08-31 09:22:23 +0100465}
466
467void TfLiteParser::ResetParser()
468{
469 m_Network = armnn::INetworkPtr(nullptr, nullptr);
470 m_Model = nullptr;
471 m_SubgraphConnections.clear();
472}
473
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200474void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
475 size_t operatorIndex,
476 IConnectableLayer *layer)
477{
478 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
479 BOOST_ASSERT(layer != nullptr);
480
Derek Lambertiff05cc52019-04-26 13:05:17 +0100481 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
482 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200483
484 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
485
486 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100487 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200488 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100489 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200490
491 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
492 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
493
494 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
495 {
496 uint32_t id = reshapedInputId;
497 reshapedInputId = inputId;
498 inputId = id;
499
500 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
501 inputTensorInfo = ToTensorInfo(tensorPtr);
502 }
503
504 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
505
506 std::vector<unsigned> reshapedDim;
507 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
508 {
509 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
510 }
511
512 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
513 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
514
515 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
516
517 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
518 armnn::ReshapeDescriptor desc;
519 desc.m_TargetShape = reshapedTensorInfo.GetShape();
520 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
521
522 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
523 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
524
525 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
526
527 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
528 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
529}
530
telsoa01c577f2c2018-08-31 09:22:23 +0100531INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
532{
533 ResetParser();
534 m_Model = LoadModelFromFile(graphFile);
535 return CreateNetworkFromModel();
536}
537
538INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
539{
540 ResetParser();
541 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
542 return CreateNetworkFromModel();
543}
544
545INetworkPtr TfLiteParser::CreateNetworkFromModel()
546{
547 m_Network = INetwork::Create();
548 BOOST_ASSERT(m_Model.get() != nullptr);
549
550 bool failedToCreate = false;
551 std::stringstream errors;
552
553 if (m_Model->subgraphs.size() != 1)
554 {
555 throw ParseException(
556 boost::str(
557 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
558 m_Model->subgraphs.size() %
559 CHECK_LOCATION().AsString()));
560 }
561
562 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100563 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100564 {
565 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
566
567 size_t operatorIndex = 0;
568 for (OperatorPtr const & op : subgraph->operators)
569 {
570 try
571 {
telsoa01c577f2c2018-08-31 09:22:23 +0100572 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
573 auto builtinCode = opCodePtr->builtin_code;
574
575 if (builtinCode > tflite::BuiltinOperator_MAX)
576 {
577 throw ParseException(
578 boost::str(
579 boost::format("Operator code %1% is out of range 0-%2%. "
580 "subgraph:%3% operator idx:%4%. %5%") %
581 builtinCode %
582 tflite::BuiltinOperator_MAX %
583 subgraphIndex %
584 operatorIndex %
585 CHECK_LOCATION().AsString()));
586 }
587
588 // lookup and call the parser function
589 auto & parserFunction = m_ParserFunctions[builtinCode];
590 (this->*parserFunction)(subgraphIndex, operatorIndex);
591 }
592 catch (const ParseException& e)
593 {
594 failedToCreate = true;
595 std::stringstream errorString;
596
597 errorString << "Failed to parse operator #" << operatorIndex
598 << " within subgraph #" << subgraphIndex
599 << " error: " << e.what();
600 BOOST_LOG_TRIVIAL(error) << errorString.str();
601
602 errors << errorString.str() << "\n";
603 }
604 ++operatorIndex;
605 }
606
607 SetupInputLayers(subgraphIndex);
608 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200609 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100610
611 ++subgraphIndex;
612 }
613
614 if (failedToCreate)
615 {
616 // we can skip everything and let the outer exception handler deal with the error
617 throw ParseException(errors.str());
618 }
619
620 // establish the connections from the layer outputs to the inputs of the subsequent layers
621 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
622 {
623 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
624 {
625 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
626 {
627 for (size_t inputSlotIdx = 0;
628 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
629 ++inputSlotIdx)
630 {
631 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
632 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
633 }
634 }
635 }
636 }
637
638 return std::move(m_Network);
639}
640
641void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
642 size_t tensorIndex,
643 armnn::IOutputSlot* slot)
644{
645 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
646 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
647 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
648
649 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
650
651 // assuming there is only one producer for that tensor
652 if (tensorSlots.outputSlot != nullptr)
653 {
654 throw ParseException(boost::str(
655 boost::format("Another layer has already registered itself as the producer of "
656 "subgraph:%1% tensor:%2% %3%") %
657 subgraphIndex %
658 tensorIndex %
659 CHECK_LOCATION().AsString()));
660 }
661
662 tensorSlots.outputSlot = slot;
663}
664
665void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
666 size_t tensorIndex,
667 armnn::IInputSlot* slot)
668{
669 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
670 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
671 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
672
673 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
674 tensorSlots.inputSlots.push_back(slot);
675}
676
677void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
678{
679 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
680 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
681 //
682 auto opcodeIndex = operatorPtr->opcode_index;
683 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
684
685 throw ParseException(
686 boost::str(
687 boost::format("Operator not supported. "
688 "subgraph:%1% operator:%2% "
689 "opcode_index:%3% opcode:%4% / %5% %6%") %
690 subgraphIndex %
691 operatorIndex %
692 opcodeIndex %
693 opcode %
694 tflite::EnumNameBuiltinOperator(opcode) %
695 CHECK_LOCATION().AsString()));
696}
697
telsoa01c577f2c2018-08-31 09:22:23 +0100698void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
699{
700 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
701
702 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
703 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
704
705 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
706
707 Convolution2dDescriptor desc;
708 desc.m_BiasEnabled = false;
709 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
710 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000711 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100712 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
713 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000714
telsoa01c577f2c2018-08-31 09:22:23 +0100715 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
716 CHECK_VALID_SIZE(inputs.size(), 2, 3);
717
718 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
719 CHECK_VALID_SIZE(outputs.size(), 1);
720
721 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
722 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
723
724 // assuming input is NHWC
725 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
726 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
727
728 // assuming the filter is OHWI : Output, H, W, Input
729 // which is essentially the same as NHWC
730 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
731 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
732
Pablo Tellof0bd6832019-04-26 17:58:13 +0100733 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
734 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
735 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
736 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100737
Matteo Martincigh747ef822018-12-18 09:26:39 +0000738 auto filterTensorAndData = CreateConstTensor(inputs[1],
739 filterTensorInfo,
740 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100741 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100742
743 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
744
745 if (inputs.size() == 3)
746 {
747 desc.m_BiasEnabled = true;
748 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000749 auto biasTensorAndData = CreateConstTensor(inputs[2],
750 biasTensorInfo,
751 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100752 layer = m_Network->AddConvolution2dLayer(desc,
753 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100754 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100755 layerName.c_str());
756 }
757 else
758 {
759 layer = m_Network->AddConvolution2dLayer(desc,
760 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100761 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100762 layerName.c_str());
763 }
764
765 BOOST_ASSERT(layer != nullptr);
766
telsoa01c577f2c2018-08-31 09:22:23 +0100767 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000768 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100769
770 // register the input connection slots for the layer, connections are made after all layers have been created
771 // only the tensors for the inputs are relevant, exclude the const tensors
772 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000773 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100774
jimfly01c25411c2018-11-14 17:47:22 +0000775 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100776 // register the output connection slots for the layer, connections are made after all layers have been created
777 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
778 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
779}
780
781void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
782{
783 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
784
785 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
786 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
787
788 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
789
790 DepthwiseConvolution2dDescriptor desc;
791 desc.m_BiasEnabled = false;
792 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
793 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000794 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100795 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100796
797 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
798 CHECK_VALID_SIZE(inputs.size(), 2, 3);
799 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
800 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100801 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
802 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000803
telsoa01c577f2c2018-08-31 09:22:23 +0100804 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
805 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
806
Matteo Martincigh747ef822018-12-18 09:26:39 +0000807 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100808 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
809 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000810
811 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100812 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
813 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
814
Matteo Martincigh747ef822018-12-18 09:26:39 +0000815 // Reshape weights as [ H, W, I, M ]
816 filterTensorInfo.SetShape({ filterHeight,
817 filterWidth,
818 inputTensorInfo.GetShape()[3],
819 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
820
821 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
822 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
823
Pablo Tellof0bd6832019-04-26 17:58:13 +0100824 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
825 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
826 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
827 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100828
Matteo Martincigh747ef822018-12-18 09:26:39 +0000829 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100830 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100831 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
832
833 if (inputs.size() == 3)
834 {
835 desc.m_BiasEnabled = true;
836 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000837 auto biasTensorAndData = CreateConstTensor(inputs[2],
838 biasTensorInfo,
839 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100840 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
841 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100842 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100843 layerName.c_str());
844 }
845 else
846 {
847 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
848 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100849 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100850 layerName.c_str());
851 }
852 BOOST_ASSERT(layer != nullptr);
853
telsoa01c577f2c2018-08-31 09:22:23 +0100854 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000855 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100856
857 // register the input connection slots for the layer, connections are made after all layers have been created
858 // only the tensors for the inputs are relevant, exclude the const tensors
859 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000860 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100861
jimfly01c25411c2018-11-14 17:47:22 +0000862 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100863 // register the output connection slots for the layer, connections are made after all layers have been created
864 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
865 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
866}
867
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100868void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
869{
870 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
871
872 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
873 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
874
875 TransposeConvolution2dDescriptor desc;
876 desc.m_BiasEnabled = false;
877 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
878 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
879 desc.m_DataLayout = armnn::DataLayout::NHWC;
880
881 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100882 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100883
884 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
885 CHECK_VALID_SIZE(outputs.size(), 1);
886
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100887 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100888 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
889
890 // TfLite uses NHWC tensors
891 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
892 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
893
894 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
895 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
896
897 CalcPadding(inputHeight,
898 filterHeight,
899 desc.m_StrideY,
900 1, // DilationY
901 desc.m_PadTop,
902 desc.m_PadBottom,
903 options->padding);
904
905 CalcPadding(inputWidth,
906 filterWidth,
907 desc.m_StrideX,
908 1, // DilationX
909 desc.m_PadLeft,
910 desc.m_PadRight,
911 options->padding);
912
913 auto filterTensorAndData = CreateConstTensor(inputs[1],
914 filterTensorInfo,
915 armnn::Optional<armnn::PermutationVector&>());
916
917 armnn::IConnectableLayer* layer = nullptr;
918 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
919
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100920 layer = m_Network->AddTransposeConvolution2dLayer(desc,
921 filterTensorAndData.first,
922 EmptyOptional(),
923 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100924
925 BOOST_ASSERT(layer != nullptr);
926
927 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
928 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
929
930 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
931 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100932 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100933
934 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
935 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
936}
937
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100938void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
939{
940 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
941}
942
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200943void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
944{
945 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
946
947 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
948 CHECK_VALID_SIZE(inputs.size(), 3);
949
950 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
951 CHECK_VALID_SIZE(outputs.size(), 1);
952
953 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
954 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
955
956 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
957 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
958
959 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
960 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
961
962 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
963 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
964
965 size_t step = 2;
966 std::vector<std::pair<unsigned int, unsigned int>> crops;
967 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
968 {
969 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
970 }
971
972 armnn::BatchToSpaceNdDescriptor desc;
973 desc.m_BlockShape = blockShape;
974 desc.m_Crops = crops;
975 desc.m_DataLayout = armnn::DataLayout::NHWC;
976
977 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
978
979 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
980 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
981
982 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
983
984 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
985 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
986
987 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
988 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
989}
990
Matthew Jackson28c94572019-07-18 10:47:03 +0100991void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
992{
993 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
994
995 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
996 CHECK_VALID_SIZE(inputs.size(), 1);
997
998 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
999 CHECK_VALID_SIZE(outputs.size(), 1);
1000
1001 L2NormalizationDescriptor desc;
1002 desc.m_DataLayout = armnn::DataLayout::NHWC;
1003 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1004 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1005
1006 BOOST_ASSERT(layer != nullptr);
1007
1008 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1009 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1010
1011 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1012 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1013
1014 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1015 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1016}
1017
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001018void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1019{
1020 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1021}
1022
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001023void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1024{
1025 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1026
1027 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1028 CHECK_VALID_SIZE(inputs.size(), 2);
1029
1030 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1031 CHECK_VALID_SIZE(outputs.size(), 1);
1032
1033 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1034 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1035
1036 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1037 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1038
1039 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1040 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1041
1042 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1043 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1044 {
1045 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1046 }
1047 else
1048 {
1049 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1050 }
1051
1052 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1053 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1054}
1055
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001056void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1057{
1058 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1059
1060 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1061 CHECK_VALID_SIZE(inputs.size(), 2);
1062
1063 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1064 CHECK_VALID_SIZE(outputs.size(), 1);
1065
1066 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1067 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1068
1069 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1070 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1071
1072 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1073 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1074
1075 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1076 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1077 {
1078 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1079 }
1080 else
1081 {
1082 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1083 }
1084
1085 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1086 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1087}
1088
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001089void TfLiteParser::ParsePool(size_t subgraphIndex,
1090 size_t operatorIndex,
1091 PoolingAlgorithm algorithm)
1092{
1093 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1094
1095 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1096 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1097
1098 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1099
1100 std::string layerName;
1101
1102 switch (algorithm)
1103 {
1104 case PoolingAlgorithm::Average:
1105 layerName =
1106 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1107 break;
1108 case PoolingAlgorithm::Max:
1109 layerName =
1110 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1111 break;
1112 default:
1113 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1114 }
1115
1116 Pooling2dDescriptor desc;
1117
1118 desc.m_PoolType = algorithm;
1119 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1120 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1121 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1122 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1123 desc.m_PaddingMethod = PaddingMethod::Exclude;
1124 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001125 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001126
1127 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1128 CHECK_VALID_SIZE(inputs.size(), 1);
1129 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1130
1131 // assuming input is NHWC
1132 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1133 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1134
Pablo Tellof0bd6832019-04-26 17:58:13 +01001135 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1136 desc.m_PadTop, desc.m_PadBottom, options->padding);
1137 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1138 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001139
1140 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1141 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001142
1143 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1144
1145 BOOST_ASSERT(layer != nullptr);
1146
jimfly01c25411c2018-11-14 17:47:22 +00001147 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1148 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001149
1150 // register the input connection slots for the layer, connections are made after all layers have been created
1151 // only the tensors for the inputs are relevant, exclude the const tensors
1152 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001153 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001154
jimfly01c25411c2018-11-14 17:47:22 +00001155 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001156 // register the output connection slots for the layer, connections are made after all layers have been created
1157 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1158 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1159}
1160
telsoa01c577f2c2018-08-31 09:22:23 +01001161void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1162{
1163 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1164 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1165 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1166
1167 SoftmaxDescriptor desc;
1168 desc.m_Beta = options->beta;
1169
1170 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1171 CHECK_VALID_SIZE(inputs.size(), 1);
1172 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1173 CHECK_VALID_SIZE(outputs.size(), 1);
1174
1175 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1176 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1177
1178 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1179 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1180
1181 // register the input connection slots for the layer, connections are made after all layers have been created
1182 // only the tensors for the inputs are relevant, exclude the const tensors
1183 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1184 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1185
1186 // register the output connection slots for the layer, connections are made after all layers have been created
1187 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1188 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1189}
1190
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001191void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1192{
1193 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1194
1195 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1196 CHECK_VALID_SIZE(inputs.size(), 3);
1197
1198 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1199 CHECK_VALID_SIZE(outputs.size(), 1);
1200
1201 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1202 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1203
1204 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1205 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1206
1207 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1208 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1209
1210 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1211 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1212
1213 size_t step = 2;
1214 std::vector<std::pair<unsigned int, unsigned int>> padList;
1215 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1216 {
1217 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1218 }
1219
1220 armnn::SpaceToBatchNdDescriptor desc;
1221 desc.m_BlockShape = blockShape;
1222 desc.m_PadList = padList;
1223 desc.m_DataLayout = armnn::DataLayout::NHWC;
1224
1225 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1226
1227 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1228 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1229
1230 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1231
1232 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1233 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1234
1235 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1236 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1237}
1238
telsoa01c577f2c2018-08-31 09:22:23 +01001239armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1240 const armnn::TensorInfo & inputTensorInfo)
1241{
1242 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1243 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1244 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1245
1246 if (inputTensorInfo.GetNumDimensions() > 4)
1247 {
1248 std::stringstream ss;
1249 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1250 << " shape:" << inputTensorInfo.GetShape() << " "
1251 << CHECK_LOCATION().AsString();
1252 throw ParseException(ss.str());
1253 }
1254
1255 if (squeezeDims.empty())
1256 {
1257 squeezeDims.assign(dimensionSequence,
1258 dimensionSequence+inputTensorInfo.GetNumDimensions());
1259 }
1260
1261 std::vector<uint32_t> outputDims;
1262 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1263 {
1264 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1265 auto currentDimension = inputTensorInfo.GetShape()[i];
1266 if (skipSqueeze || currentDimension != 1)
1267 {
1268 outputDims.push_back(currentDimension);
1269 }
1270 }
1271
1272 if (outputDims.size() > 4)
1273 {
1274 std::stringstream ss;
1275 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1276 << " shape:" << inputTensorInfo.GetShape() << " "
1277 << CHECK_LOCATION().AsString();
1278 throw ParseException(ss.str());
1279 }
1280
1281 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1282 outputDims.data());
1283
1284 // we need to preserve the tensor type and the quantization data as well
1285 TensorInfo outTensorInfo = inputTensorInfo;
1286 outTensorInfo.SetShape(outShape);
1287
1288 return outTensorInfo;
1289}
1290
1291void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1292{
1293 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1294
1295 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1296 CHECK_VALID_SIZE(inputs.size(), 1);
1297
1298 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1299 CHECK_VALID_SIZE(outputs.size(), 1);
1300
1301 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1302 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1303
1304 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1305 armnn::TensorInfo outputTensorInfo =
1306 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1307 inputTensorInfo);
1308
1309 ReshapeDescriptor reshapeDesc;
1310 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1311
1312 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1313 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1314 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1315
1316 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1317 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1318
1319 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1320 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1321}
1322
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001323void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1324{
1325 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1326
1327 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1328 CHECK_VALID_SIZE(inputs.size(), 4);
1329
1330 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1331 CHECK_VALID_SIZE(outputs.size(), 1);
1332
1333 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1334 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1335
1336 StridedSliceDescriptor desc;
1337 desc.m_BeginMask = options->begin_mask;
1338 desc.m_EllipsisMask = options->ellipsis_mask;
1339 desc.m_EndMask = options->end_mask;
1340 desc.m_NewAxisMask = options->new_axis_mask;
1341 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1342 desc.m_DataLayout = armnn::DataLayout::NHWC;
1343
1344 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1345 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1346
1347 std::vector<int> begin(beginTensorInfo.GetNumElements());
1348 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1349
1350 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1351 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1352
1353 std::vector<int> end(endTensorInfo.GetNumElements());
1354 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1355
1356 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1357 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1358
1359 std::vector<int> stride(strideTensorInfo.GetNumElements());
1360 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1361
1362 desc.m_Begin = begin;
1363 desc.m_End = end;
1364 desc.m_Stride = stride;
1365
1366 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1367 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1368
1369 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1370 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1371
1372 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1373 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1374
1375 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1376 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1377}
1378
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001379void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1380{
1381 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1382
1383 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1384 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1385
1386 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1387 CHECK_VALID_SIZE(inputs.size(), 2);
1388
1389 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1390 CHECK_VALID_SIZE(outputs.size(), 1);
1391
1392 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1393 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1394
1395 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1396 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1397
1398 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1399 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1400
1401 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1402 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1403 {
1404 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1405 }
1406 else
1407 {
1408 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1409 }
1410
1411 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1412
1413 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1414 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1415}
1416
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001417void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1418{
1419 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1420
1421 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1422 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1423
1424 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1425 CHECK_VALID_SIZE(inputs.size(), 2);
1426
1427 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1428 CHECK_VALID_SIZE(outputs.size(), 1);
1429
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001430 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1431 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1432
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001433 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1434 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1435
1436 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1437 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1438
1439 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001440 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1441 {
1442 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1443 }
1444 else
1445 {
1446 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1447 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001448
1449 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1450
1451 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1452 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1453}
1454
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001455void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1456{
1457 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1458
1459 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1460 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1461
1462 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1463 CHECK_VALID_SIZE(inputs.size(), 2);
1464
1465 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1466 CHECK_VALID_SIZE(outputs.size(), 1);
1467
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001468 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1469 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1470
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001471 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1472 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1473
1474 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1475 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1476
1477 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001478 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1479 {
1480 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1481 }
1482 else
1483 {
1484 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1485 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001486
1487 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1488
1489 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1490 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1491}
1492
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001493void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1494{
1495 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1496
1497 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1498
1499 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1500 CHECK_VALID_SIZE(outputs.size(), 1);
1501
1502 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1503 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1504
1505 armnn::MeanDescriptor desc;
1506 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1507 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1508 desc.m_Axis = axis;
1509
1510 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1511 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1512
1513 desc.m_KeepDims =
1514 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1515 true : false;
1516
1517 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1518 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1519
1520 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1521
1522 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1523 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1524
1525 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1526 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1527}
1528
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001529void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1530{
1531 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1532
1533 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1534
1535 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1536 CHECK_VALID_SIZE(outputs.size(), 1);
1537
1538 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1539 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1540
1541 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1542 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1543
1544 size_t step = 2;
1545 armnn::PadDescriptor desc;
1546 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1547 {
1548 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1549 }
1550
1551 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1552 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1553
1554 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1555 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1556
1557 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1558 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1559
1560 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1561 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1562}
1563
Finn Williamsc42c3842019-01-22 14:18:11 +00001564
Sadik Armagan58f39192018-09-17 14:14:39 +01001565void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1566{
Finn Williamsc42c3842019-01-22 14:18:11 +00001567 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001568}
1569
1570void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1571{
Finn Williamsc42c3842019-01-22 14:18:11 +00001572 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1573}
Sadik Armagan58f39192018-09-17 14:14:39 +01001574
Finn Williamsc42c3842019-01-22 14:18:11 +00001575void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1576{
1577 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1578}
1579
Nina Drozd99851762019-04-09 09:37:38 +01001580void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1581{
1582 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1583}
1584
Finn Williamsc42c3842019-01-22 14:18:11 +00001585
1586void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1587{
1588 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001589 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1590 boost::ignore_unused(operatorPtr);
1591
1592 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1593 CHECK_VALID_SIZE(inputs.size(), 1);
1594
1595 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1596 CHECK_VALID_SIZE(outputs.size(), 1);
1597
Finn Williamsc42c3842019-01-22 14:18:11 +00001598 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001599 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001600 activationDesc.m_Function = activationType;
1601
1602 switch (activationType)
1603 {
1604 case ActivationFunction::ReLu:
1605 {
1606 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1607 break;
1608 }
1609 case ActivationFunction::BoundedReLu:
1610 {
1611 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1612 activationDesc.m_A = 6.0f;
1613 activationDesc.m_B = 0.0f;
1614 break;
1615 }
1616 case ActivationFunction::Sigmoid:
1617 {
1618 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1619 break;
1620 }
Nina Drozd99851762019-04-09 09:37:38 +01001621 case ActivationFunction::TanH:
1622 {
1623 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1624 activationDesc.m_A = 1.0f;
1625 activationDesc.m_B = 1.0f;
1626 break;
1627 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001628 default:
1629 {
1630 throw ParseException(
1631 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1632 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1633 }
1634 }
1635
1636 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001637
1638 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1639 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1640
1641 // register the input connection slots for the layer, connections are made after all layers have been created
1642 // only the tensors for the inputs are relevant, exclude the const tensors
1643 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1644 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1645
1646 // register the output connection slots for the layer, connections are made after all layers have been created
1647 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1648 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1649}
Sadikb94967b2018-09-19 15:30:00 +01001650armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1651 const std::vector<int32_t> & targetDimsIn)
1652{
1653 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1654 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1655
1656 if (stretchDim != targetDimsIn.end())
1657 {
1658 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1659 {
1660 throw ParseException(
1661 boost::str(
1662 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1663 }
1664
1665 auto targetNumElements =
1666 boost::numeric_cast<unsigned int>(
1667 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1668
1669 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1670 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1671 }
1672
1673 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1674
1675 TensorInfo reshapeInfo = inputTensorInfo;
1676 reshapeInfo.SetShape(outputShape);
1677
1678 return reshapeInfo;
1679}
1680
1681void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1682{
1683 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1684
1685 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001686
1687 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1688 CHECK_VALID_SIZE(outputs.size(), 1);
1689
1690 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1691 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1692
1693 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001694 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1695 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001696 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1697
kevmay0171972a82018-12-17 14:28:03 +00001698 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001699 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1700 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001701 {
1702 std::stringstream ss;
1703 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001704 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001705 << " does not equal output shape "
1706 << actualOutputTensorInfo.GetShape()
1707 << ": "
1708 << CHECK_LOCATION().AsString();
1709 throw ParseException(ss.str());
1710 }
1711
Sadikb94967b2018-09-19 15:30:00 +01001712 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001713 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001714
1715 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1716 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001717 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001718
1719 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1720 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1721
1722 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1723 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1724}
1725
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001726void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1727{
1728 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1729
1730 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1731 CHECK_VALID_SIZE(inputs.size(), 2);
1732
1733 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1734 CHECK_VALID_SIZE(outputs.size(), 1);
1735
1736 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1737
1738 // Data for the parsed tensor args (size) must be stored locally.
1739 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1740
1741 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1742 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1743
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001744 ResizeDescriptor desc;
1745 desc.m_Method = armnn::ResizeMethod::Bilinear;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001746 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001747 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1748 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001749
1750 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001751 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001752
1753 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1754 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1755
1756 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1757 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1758
1759 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1760 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1761}
1762
Sadik Armagan479045b2018-10-01 11:51:37 +01001763void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1764{
1765 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1766
1767 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1768 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1769
1770 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1771
1772 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1773 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1774 CHECK_VALID_SIZE(outputs.size(), 1);
1775
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001776 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1777 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001778
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001779 const unsigned int concatDimInput = static_cast<unsigned int>(
1780 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001781
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001782 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1783 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001784
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001785 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001786
1787 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1788 {
1789 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1790
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001791 // This set up concatDescriptor view origin
1792 armnnUtils::ProcessConcatInputTensorInfo(
1793 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001794 }
1795
1796 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01001797 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01001798
1799 BOOST_ASSERT(layer != nullptr);
1800
1801 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1802 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001803
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001804 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001805
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001806 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001807
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001808 // add fused activation layer
1809 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001810
Sadik Armagan479045b2018-10-01 11:51:37 +01001811 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1812 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1813}
1814
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001815void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1816{
1817 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1818
1819 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1820 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1821
1822 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1823
1824 FullyConnectedDescriptor desc;
1825 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001826 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001827
1828 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1829 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1830 CHECK_VALID_SIZE(outputs.size(), 1);
1831
1832 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1833
1834 // Fully Connected Layer accepts two dimensional weights input
1835 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1836 if (weightsDimension != 2)
1837 {
1838 throw ParseException(
1839 boost::str(
1840 boost::format(
1841 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1842 "Node %2%")
1843 % weightsDimension
1844 % CHECK_LOCATION().AsString()));
1845 }
1846
Matteo Martincigh747ef822018-12-18 09:26:39 +00001847 auto filterTensorAndData = CreateConstTensor(inputs[1],
1848 filterTensorInfo,
1849 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001850 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001851 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1852
1853 if (inputs.size() == 3)
1854 {
1855 desc.m_BiasEnabled = true;
1856 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001857 auto biasTensorAndData = CreateConstTensor(inputs[2],
1858 biasTensorInfo,
1859 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001860 layer = m_Network->AddFullyConnectedLayer(desc,
1861 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001862 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001863 layerName.c_str());
1864 }
1865 else
1866 {
1867 layer = m_Network->AddFullyConnectedLayer(desc,
1868 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001869 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001870 layerName.c_str());
1871 }
1872 BOOST_ASSERT(layer != nullptr);
1873
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001874 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1875
1876 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1877
1878 if (inputTensorInfo.GetNumDimensions() > 2)
1879 {
1880 // Add reshape to flatten to 2D [batch_size, input_size],
1881 // where "input_size" corresponds to the number of inputs to the layer,
1882 // matching the second dimension of weights,
1883 // and "batch_size" is calculated by dividing the number of elements by "input_size".
1884 std::vector<unsigned int> reshapedDimensions(2);
1885 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
1886 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
1887
1888 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
1889 {
1890 throw ParseException(
1891 boost::str(
1892 boost::format(
1893 "Failed to deduce input tensor shape from filter size %1%")
1894 % reshapedDimensions[1]
1895 % CHECK_LOCATION().AsString()));
1896 }
1897
1898 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
1899 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
1900
1901 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
1902 armnn::ReshapeDescriptor desc;
1903 desc.m_TargetShape = reshapedTensorInfo.GetShape();
1904 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
1905
1906 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
1907 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1908
1909 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
1910 }
1911 else
1912 {
1913 // register the input connection slot for the layer
1914 // only the tensors for the inputs are relevant, exclude the const tensors
1915 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1916 }
1917
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001918 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1919 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1920
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001921 // we need to add the activation layer and fortunately we don't need to care about the data layout
1922 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1923 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001924
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001925 // register the output connection slots for the layer, connections are made after all layers have been created
1926 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1927 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1928}
1929
keidav011b3e2ea2019-02-21 10:07:37 +00001930void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1931{
1932 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1933
1934 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1935
1936 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1937 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1938 CHECK_VALID_SIZE(outputs.size(), 4);
1939
1940 // Obtain custom options from flexbuffers
1941 auto custom_options = operatorPtr->custom_options;
1942 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1943
1944 // Obtain descriptor information from tf lite
1945 DetectionPostProcessDescriptor desc;
1946 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1947 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1948 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1949 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1950 desc.m_NumClasses = m["num_classes"].AsUInt32();
1951 desc.m_ScaleH = m["h_scale"].AsFloat();
1952 desc.m_ScaleW = m["w_scale"].AsFloat();
1953 desc.m_ScaleX = m["x_scale"].AsFloat();
1954 desc.m_ScaleY = m["y_scale"].AsFloat();
1955
keidav0107d58c72019-02-26 11:57:39 +00001956 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00001957 {
keidav0107d58c72019-02-26 11:57:39 +00001958 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00001959 }
1960 if (!(m["detections_per_class"].IsNull()))
1961 {
1962 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1963 }
1964
1965 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1966 {
1967 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1968 "must be positive and less than or equal to 1.");
1969 }
1970
1971 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
1972 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
1973 armnn::Optional<armnn::PermutationVector&>());
1974
1975 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
1976 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
1977 layerName.c_str());
1978
1979 BOOST_ASSERT(layer != nullptr);
1980
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001981 // The model does not specify the output shapes.
1982 // The output shapes are calculated from the max_detection and max_classes_per_detection.
1983 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
1984 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
1985 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
1986 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
1987 m_OverridenOutputShapes.push_back({ 1 });
1988
keidav011b3e2ea2019-02-21 10:07:37 +00001989 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
1990 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001991 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00001992 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
1993 }
1994
1995 // Register the input connection slots for the layer, connections are made after all layers have been created
1996 // only the tensors for the inputs are relevant, exclude the const tensors
1997 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1998 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1999
2000 // Register the output connection slots for the layer, connections are made after all layers have been created
2001 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2002 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2003 outputTensorIndexes[1],
2004 outputTensorIndexes[2],
2005 outputTensorIndexes[3]});
2006}
2007
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002008/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2009void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2010{
2011 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2012
2013 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2014 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2015 CHECK_VALID_SIZE(outputs.size(), 1);
2016
2017 if (inputs.size() < 1)
2018 {
2019 throw ParseException("Pack must have at least one input.");
2020 }
2021
2022 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2023 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2024
2025 StackDescriptor desc;
2026 desc.m_Axis = static_cast<uint32_t>(options->axis);
2027 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2028
2029 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2030 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2031 desc.m_InputShape = inputTensorInfo.GetShape();
2032
2033 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2034 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2035
2036 BOOST_ASSERT(layer != nullptr);
2037
2038 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2039 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2040
2041 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2042 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2043
2044 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2045 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2046}
2047
Nina Drozd200e3802019-04-15 09:47:39 +01002048void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2049{
2050 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2051
2052 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2053 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2054
2055 // This unpackAxis indicates the axis to unpack
2056 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2057
2058 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2059 CHECK_VALID_SIZE(inputs.size(), 1);
2060
2061 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002062
2063 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2064 {
2065 throw ParseException(
2066 boost::str(
2067 boost::format(
2068 "The unpack axis: %1% cannot be greater than or equal to "
2069 "the number of input dimension %2% %3%")
2070 % unpackAxis
2071 % inputTensorInfo.GetNumDimensions()
2072 % CHECK_LOCATION().AsString()));
2073 }
2074
Nina Drozd200e3802019-04-15 09:47:39 +01002075 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2076 // If num is not defined, automatically infer from the length of the dimension axis.
2077 if(unpackNum == 0)
2078 {
2079 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2080 }
2081
2082 // If unpack number cannot be inferred and is still zero, throw ParseException.
2083 if(unpackNum == 0)
2084 {
2085 throw ParseException("Number to unpack must greater than zero.");
2086 }
2087
2088 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2089 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2090
2091 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2092 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2093
2094 // Add current input shape to unpackDimSizes
2095 for (unsigned int i = 0; i < inputDimSize; ++i)
2096 {
2097 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2098 }
2099
2100 if (unpackDimSizes[unpackAxis] != unpackNum)
2101 {
2102 throw ParseException("Number to unpack must be the same as length of the dimension to "
2103 "unpack along.");
2104 }
2105
2106 unpackDimSizes[unpackAxis] /= unpackNum;
2107
2108 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2109 for (unsigned int j = 0; j < unpackNum; ++j)
2110 {
2111 // Set the size of the views.
2112 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2113 {
2114 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2115 }
2116 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2117 }
2118
2119 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2120 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2121
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002122 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2123 unpackDimSizes.data());
2124
Nina Drozd200e3802019-04-15 09:47:39 +01002125 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2126 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2127
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002128 // Reshape to remove unpacked dimension
2129 unsigned int reshapedNumDimensions = inputDimSize - 1;
2130 std::vector<unsigned int> reshapedDimensions(reshapedNumDimensions);
Nina Drozd200e3802019-04-15 09:47:39 +01002131
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002132 unsigned int reshapeIndex = 0;
2133 for (unsigned int i = 0; i < inputDimSize; ++i)
Nina Drozd200e3802019-04-15 09:47:39 +01002134 {
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002135 if (i == unpackAxis)
2136 {
2137 continue;
2138 }
2139 reshapedDimensions[reshapeIndex++] = unpackDimSizes[i];
Nina Drozd200e3802019-04-15 09:47:39 +01002140 }
2141
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002142 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2143 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2144 {
2145 armnn::TensorInfo reshapedTensorInfo = inputTensorInfo;
2146 reshapedTensorInfo.SetShape(armnn::TensorShape{ reshapedNumDimensions, reshapedDimensions.data() });
2147
2148 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2149 armnn::ReshapeDescriptor desc;
2150 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2151 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2152
2153 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape, inputTensorInfo.GetDataType()));
2154 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2155
2156 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2157
2158 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2159 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2160 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2161 }
Nina Drozd200e3802019-04-15 09:47:39 +01002162}
2163
Nina Drozd0324f482019-04-08 10:52:10 +01002164void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2165{
2166 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2167
2168 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2169 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2170
2171 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2172
Nina Drozd200e3802019-04-15 09:47:39 +01002173 // If number of splits cannot be inferred and is zero, throw ParseException.
2174 if(numSplits == 0)
2175 {
2176 throw ParseException("Number to splits must greater than zero.");
2177 }
2178
Nina Drozd0324f482019-04-08 10:52:10 +01002179 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2180 CHECK_VALID_SIZE(inputs.size(), 2);
2181 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2182 CHECK_VALID_SIZE(outputs.size(), numSplits);
2183
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002184 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2185 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002186
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002187 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2188 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2189 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2190
2191 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2192 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002193
2194 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2195 if (splitDim == 0 || splitDim == 2)
2196 {
2197 throw ParseException(
2198 boost::str(
2199 boost::format(
2200 "Dimension %1% for split is not supported by Armnn. %2%")
2201 % splitDim
2202 % CHECK_LOCATION().AsString()));
2203 }
2204
2205 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002206 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002207 {
2208 throw ParseException(
2209 boost::str(
2210 boost::format(
2211 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002212 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002213 % inputTensorInfo.GetNumDimensions()
2214 % MaxNumOfTensorDimensions
2215 % CHECK_LOCATION().AsString()));
2216 }
2217
2218 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2219
2220 // Add current input shape to splitterDimSizes
2221 for (unsigned int i = 0; i < inputDimSize; ++i)
2222 {
2223 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2224 }
2225
2226 if (splitterDimSizes[splitDim] % numSplits != 0)
2227 {
2228 throw ParseException("Number of splits must evenly divide the dimension");
2229 }
2230 splitterDimSizes[splitDim] /= numSplits;
2231
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002232 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002233 for (unsigned int j = 0; j < numSplits; ++j)
2234 {
2235 // Set the size of the views.
2236 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2237 {
2238 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2239 }
2240 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2241 }
2242
2243 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2244 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2245
2246 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002247 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002248
2249 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2250 splitterDimSizes.data());
2251
2252 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2253 {
2254 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(outShape,
2255 inputTensorInfo.GetDataType()));
2256 }
2257
2258 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2259 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2260}
2261
Sadik Armagan58f39192018-09-17 14:14:39 +01002262armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2263 unsigned int outputSlot,
2264 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002265{
2266 ActivationDescriptor activationDesc;
2267 std::string layerName = prevLayer->GetName();
2268
2269 switch(activationType)
2270 {
2271 case tflite::ActivationFunctionType_NONE:
2272 {
2273 // this is a no-op: return previous layer
2274 return prevLayer;
2275 }
2276 case tflite::ActivationFunctionType_RELU:
2277 {
2278 activationDesc.m_Function = ActivationFunction::ReLu;
2279 layerName += ":RELU";
2280 break;
2281 }
2282 case tflite::ActivationFunctionType_RELU6:
2283 {
2284 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2285 activationDesc.m_A = 6.0f;
2286 activationDesc.m_B = 0.0f;
2287 layerName += ":RELU6";
2288 break;
2289 }
2290 case tflite::ActivationFunctionType_TANH:
2291 {
2292 activationDesc.m_Function = ActivationFunction::TanH;
2293 activationDesc.m_A = 1.0f;
2294 activationDesc.m_B = 1.0f;
2295 layerName += ":TANH";
2296 break;
2297 }
2298
2299 // I only put these here as a reminder what others we could support
2300 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2301 case tflite::ActivationFunctionType_SIGN_BIT:
2302 default:
2303 {
2304 throw ParseException(
2305 boost::str(
2306 boost::format("TfLite parser doesn't suppport fused activation: "
2307 "%1%/%2% %3% ") %
2308 activationType %
2309 tflite::EnumNameActivationFunctionType(activationType) %
2310 CHECK_LOCATION().AsString()));
2311
2312 }
2313 }
2314
2315 IConnectableLayer* activationLayer =
2316 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2317
2318 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2319 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2320 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2321 return activationLayer;
2322}
2323
2324TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2325{
2326 if (fileName == nullptr)
2327 {
2328 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2329 CHECK_LOCATION().AsString()));
2330 }
2331 boost::system::error_code errorCode;
2332 boost::filesystem::path pathToFile(fileName);
2333 if (!boost::filesystem::exists(pathToFile, errorCode))
2334 {
2335 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2336 fileName %
2337 errorCode %
2338 CHECK_LOCATION().AsString()));
2339 }
2340 std::ifstream file(fileName, std::ios::binary);
2341 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2342 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2343 fileContent.size());
2344}
2345
2346TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2347{
2348 if (binaryContent == nullptr)
2349 {
2350 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2351 CHECK_LOCATION().AsString()));
2352 }
2353 flatbuffers::Verifier verifier(binaryContent, len);
2354 if (verifier.VerifyBuffer<tflite::Model>() == false)
2355 {
2356 throw ParseException(
2357 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2358 "flatbuffers format. size:%1% %2%") %
2359 len %
2360 CHECK_LOCATION().AsString()));
2361 }
2362 return tflite::UnPackModel(binaryContent);
2363}
2364
2365TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2366 size_t subgraphIndex,
2367 size_t operatorIndex)
2368{
2369 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2370
Derek Lambertiff05cc52019-04-26 13:05:17 +01002371 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2372 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002373
2374 size_t inputCount = operatorPtr->inputs.size();
2375 TensorRawPtrVector result(inputCount);
2376 for (size_t i=0; i<inputCount; ++i)
2377 {
2378 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002379 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002380 }
2381 return result;
2382}
2383
2384TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2385 size_t subgraphIndex,
2386 size_t operatorIndex)
2387{
2388 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2389
Derek Lambertiff05cc52019-04-26 13:05:17 +01002390 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2391 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002392
2393 size_t outputCount = operatorPtr->outputs.size();
2394 TensorRawPtrVector result(outputCount);
2395 for (size_t i=0; i<outputCount; ++i)
2396 {
2397 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2398 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002399 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002400 }
2401 return result;
2402}
2403
2404TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2405 size_t subgraphIndex)
2406{
2407 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002408 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002409
Derek Lambertiff05cc52019-04-26 13:05:17 +01002410 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002411 TensorIdRawPtrVector result(inputCount);
2412 for (size_t i=0; i<inputCount; ++i)
2413 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002414 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002415 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002416 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002417 }
2418 return result;
2419}
2420
2421TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2422 size_t subgraphIndex)
2423{
2424 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002425 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002426
Derek Lambertiff05cc52019-04-26 13:05:17 +01002427 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002428 TensorIdRawPtrVector result(outputCount);
2429 for (size_t i=0; i<outputCount; ++i)
2430 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002431 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2432 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002433 }
2434 return result;
2435}
2436
2437std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2438 size_t subgraphIndex,
2439 size_t operatorIndex)
2440{
2441 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002442 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2443 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002444 return operatorPtr->inputs;
2445}
2446
2447std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2448 size_t subgraphIndex,
2449 size_t operatorIndex)
2450{
2451 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002452 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2453 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002454 return operatorPtr->outputs;
2455}
2456
2457void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2458 size_t operatorIndex,
2459 IConnectableLayer* layer,
2460 const std::vector<unsigned int>& tensorIndexes)
2461{
2462 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2463 BOOST_ASSERT(layer != nullptr);
2464 if (tensorIndexes.size() != layer->GetNumInputSlots())
2465 {
2466 throw ParseException(
2467 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2468 " for subgraph:%3% operator index:%4% %5%") %
2469 tensorIndexes.size() %
2470 layer->GetNumInputSlots() %
2471 subgraphIndex %
2472 operatorIndex %
2473 CHECK_LOCATION().AsString()));
2474 }
2475
2476 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2477 {
2478 unsigned int tensorIndex = tensorIndexes[slotIndex];
2479 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2480 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2481 }
2482}
2483
2484void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2485 size_t operatorIndex,
2486 IConnectableLayer* layer,
2487 const std::vector<unsigned int>& tensorIndexes)
2488{
2489 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2490 BOOST_ASSERT(layer != nullptr);
2491 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2492 {
2493 throw ParseException(
2494 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2495 " for subgraph:%3% operator index:%4% %5%") %
2496 tensorIndexes.size() %
2497 layer->GetNumOutputSlots() %
2498 subgraphIndex %
2499 operatorIndex %
2500 CHECK_LOCATION().AsString()));
2501 }
2502
2503 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2504 {
2505 unsigned int tensorIndex = tensorIndexes[slotIndex];
2506 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2507 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2508 }
2509}
2510
2511void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2512{
2513 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2514
2515 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2516 for (auto const & tensorIdAndPtr : inputs)
2517 {
2518 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2519 IConnectableLayer* layer =
2520 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2521
2522 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2523 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2524
2525 RegisterOutputSlots(subgraphIndex,
2526 VIRTUAL_OPERATOR_ID,
2527 layer,
2528 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2529 }
2530}
2531
2532void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2533{
2534 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2535
2536 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2537 for (auto const & tensorIdAndPtr : outputs)
2538 {
2539 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2540 IConnectableLayer* layer =
2541 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2542
2543 RegisterInputSlots(subgraphIndex,
2544 VIRTUAL_OPERATOR_ID,
2545 layer,
2546 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2547 }
2548}
2549
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002550void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2551{
2552 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2553
Derek Lambertiff05cc52019-04-26 13:05:17 +01002554 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002555 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2556 {
2557 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2558 {
2559 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2560 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2561 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002562 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002563 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2564 auto tensorAndData = CreateConstTensor(tensorPtr,
2565 tensorInfo,
2566 armnn::Optional<armnn::PermutationVector&>());
2567
2568 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2569 IConnectableLayer *layer =
2570 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2571
2572 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2573 RegisterOutputSlots(subgraphIndex,
2574 VIRTUAL_OPERATOR_ID,
2575 layer,
2576 { tensorIndex });
2577
2578 }
2579 }
2580 }
2581}
2582
telsoa01c577f2c2018-08-31 09:22:23 +01002583// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2584TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2585{
2586 CHECK_BUFFER(model, bufferIndex);
2587 return model->buffers[bufferIndex].get();
2588}
2589
Matteo Martincigh747ef822018-12-18 09:26:39 +00002590template<typename T>
2591std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2592TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2593 TfLiteParser::TensorRawPtr tensorPtr,
2594 armnn::TensorInfo& tensorInfo,
2595 armnn::Optional<armnn::PermutationVector&> permutationVector)
2596{
2597 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2598 tensorPtr,
2599 tensorInfo,
2600 permutationVector);
2601 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2602 return std::make_pair(constData.first, std::move(storage));
2603}
2604
telsoa01c577f2c2018-08-31 09:22:23 +01002605std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2606TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002607 armnn::TensorInfo& tensorInfo,
2608 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002609{
2610 CHECK_TENSOR_PTR(tensorPtr);
2611 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2612 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2613
2614 switch (tensorInfo.GetDataType())
2615 {
2616 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002617 return CreateConstTensorAndStoreData<float>(bufferPtr,
2618 tensorPtr,
2619 tensorInfo,
2620 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002621 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002622 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2623 tensorPtr,
2624 tensorInfo,
2625 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002626 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002627 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2628 tensorPtr,
2629 tensorInfo,
2630 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002631 default:
2632 {
2633 std::stringstream errString;
2634 errString << "Unexpected datatype when creating const tensor: "
2635 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2636 << " shape:" << tensorInfo.GetShape()
2637 << CHECK_LOCATION().AsString();
2638 throw ParseException(errString.str());
2639 }
2640 }
2641}
2642
2643BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2644 const std::string& name) const
2645{
2646 CHECK_SUBGRAPH(m_Model, subgraphId);
2647 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2648 for (auto const & input : inputs)
2649 {
2650 if (input.second->name == name)
2651 {
2652 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2653 return std::make_pair(bindingId, ToTensorInfo(input.second));
2654 }
2655 }
2656
2657 std::stringstream bindings;
2658 for (auto const & input : inputs)
2659 {
2660 bindings << "'" << input.second->name << "' ";
2661 }
2662
2663 throw ParseException(
2664 boost::str(
2665 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2666 "Possible inputs are: [%3%] %4%") %
2667 subgraphId %
2668 name %
2669 bindings.str() %
2670 CHECK_LOCATION().AsString()));
2671}
2672
2673BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2674 const std::string& name) const
2675{
2676 CHECK_SUBGRAPH(m_Model, subgraphId);
2677 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002678 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002679 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002680 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002681 if (output.second->name == name)
2682 {
2683 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002684 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2685 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2686 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002687 }
2688 }
2689
2690 std::stringstream bindings;
2691 for (auto const & output : outputs)
2692 {
2693 bindings << "'" << output.second->name << "' ";
2694 }
2695
2696 throw ParseException(
2697 boost::str(
2698 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2699 "Possible outputs are: [%3%] %4%") %
2700 subgraphId %
2701 name %
2702 bindings.str() %
2703 CHECK_LOCATION().AsString()));
2704}
2705
2706size_t TfLiteParser::GetSubgraphCount() const
2707{
2708 return m_Model->subgraphs.size();
2709}
2710
2711std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2712{
2713 CHECK_SUBGRAPH(m_Model, subgraphId);
2714 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2715 std::vector<std::string> result;
2716 result.reserve(inputs.size());
2717 for (auto const & input : inputs)
2718 {
2719 result.push_back(input.second->name);
2720 }
2721 return result;
2722}
2723
2724std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2725{
2726 CHECK_SUBGRAPH(m_Model, subgraphId);
2727 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2728 std::vector<std::string> result;
2729 result.reserve(outputs.size());
2730 for (auto const & output : outputs)
2731 {
2732 result.push_back(output.second->name);
2733 }
2734 return result;
2735}
2736
2737ITfLiteParser* ITfLiteParser::CreateRaw()
2738{
2739 return new TfLiteParser();
2740}
2741
2742ITfLiteParserPtr ITfLiteParser::Create()
2743{
2744 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2745}
2746
2747void ITfLiteParser::Destroy(ITfLiteParser* parser)
2748{
2749 delete parser;
2750}
2751
2752TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2753: m_FloatData(std::move(data))
2754, m_Uint8Data(nullptr)
2755, m_Int32Data(nullptr)
2756{
2757}
2758
2759TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2760: m_FloatData(nullptr)
2761, m_Uint8Data(std::move(data))
2762, m_Int32Data(nullptr)
2763{
2764}
2765
2766TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2767: m_FloatData(nullptr)
2768, m_Uint8Data(nullptr)
2769, m_Int32Data(std::move(data))
2770{
2771}
2772
2773} // armnnTfLiteParser