blob: da81c0a62869d3e1c3b2e2463fef70e63771d595 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026
27#include <fstream>
28#include <algorithm>
29#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010030#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000031#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010032
33using namespace armnn;
34using armnn::CheckLocation;
35namespace armnnTfLiteParser
36{
37namespace
38{
jimfly01c25411c2018-11-14 17:47:22 +000039
telsoa01c577f2c2018-08-31 09:22:23 +010040const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
41
42void CheckSubgraph(const TfLiteParser::ModelPtr & model,
43 size_t subgraphIndex,
44 const CheckLocation & location)
45{
46 if (model.get() == nullptr)
47 {
48 throw ParseException(
49 boost::str(
50 boost::format("%1% was called with invalid (null) model. "
51 "Possible reason is that the model is not yet loaded and Unpack(ed). "
52 "subgraph:%2% at %3%") %
53 location.m_Function %
54 subgraphIndex %
55 location.FileLine()));
56 }
57 else if (subgraphIndex >= model->subgraphs.size())
58 {
59 throw ParseException(
60 boost::str(
61 boost::format("%1% was called with an invalid subgraph index. "
62 "subgraph:%2% at %3%") %
63 location.m_Function %
64 subgraphIndex %
65 location.FileLine()));
66 }
67}
68
69#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
70 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
71
72void CheckModel(const TfLiteParser::ModelPtr & model,
73 size_t subgraphIndex,
74 size_t operatorIndex,
75 const CheckLocation & location)
76{
77 if (model.get() == nullptr)
78 {
79 throw ParseException(
80 boost::str(
81 boost::format("%1% was called with invalid (null) model. "
82 "Possible reason is that the model is not yet loaded and Unpack(ed). "
83 "subgraph:%2% operator:%3% at %4%") %
84 location.m_Function %
85 subgraphIndex %
86 operatorIndex %
87 location.FileLine()));
88 }
89 else if (subgraphIndex >= model->subgraphs.size())
90 {
91 throw ParseException(
92 boost::str(
93 boost::format("%1% was called with an invalid subgraph index. "
94 "subgraph:%2% operator:%3% at %4%") %
95 location.m_Function %
96 subgraphIndex %
97 operatorIndex %
98 location.FileLine()));
99 }
100 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
101 operatorIndex != VIRTUAL_OPERATOR_ID)
102 {
103 throw ParseException(
104 boost::str(
105 boost::format("%1% was called with an invalid operator index. "
106 "subgraph:%2% operator:%3% at %4%") %
107 location.m_Function %
108 subgraphIndex %
109 operatorIndex %
110 location.FileLine()));
111 }
112}
113
114#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
115 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
116
117void CheckTensor(const TfLiteParser::ModelPtr & model,
118 size_t subgraphIndex,
119 size_t tensorIndex,
120 const CheckLocation & location)
121{
122 // not checking model, because I assume CHECK_MODEL already run
123 // and checked that. An assert would do.
124 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
125
126 // also subgraph index should be checked by CHECK_MODEL so
127 // I only add an assert here
128 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
129
130 // the tensor index is the only one to check here
131 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
132 {
133 throw ParseException(
134 boost::str(
135 boost::format("%1% was called with an invalid tensor index. "
136 "subgraph:%2% tensor:%3% at %4%") %
137 location.m_Function %
138 subgraphIndex %
139 tensorIndex %
140 location.FileLine()));
141 }
142}
143
144#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
145 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
146
147void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
148 const CheckLocation & location)
149{
150 if (rawPtr == nullptr)
151 {
152 throw ParseException(
153 boost::str(
154 boost::format("%1% was called with a null tensor pointer. "
155 "at %2%") %
156 location.m_Function %
157 location.FileLine()));
158
159 }
160}
161
162#define CHECK_TENSOR_PTR(TENSOR_PTR) \
163 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
164
165void CheckBuffer(const TfLiteParser::ModelPtr & model,
166 size_t bufferIndex,
167 const CheckLocation & location)
168{
169 if (model.get() == nullptr)
170 {
171 throw ParseException(
172 boost::str(
173 boost::format("%1% was called with invalid (null) model. "
174 "Possible reason is that the model is not yet loaded and Unpack(ed). "
175 "buffer:%2% at %3%") %
176 location.m_Function %
177 bufferIndex %
178 location.FileLine()));
179 }
180 else if (bufferIndex >= model->buffers.size())
181 {
182 throw ParseException(
183 boost::str(
184 boost::format("%1% was called with an invalid buffer index. "
185 "buffer index:%2% at %3%") %
186 location.m_Function %
187 bufferIndex %
188 location.FileLine()));
189 }
190 else if (model->buffers[bufferIndex].get() == nullptr)
191 {
192 throw ParseException(
193 boost::str(
194 boost::format("The buffer #%1% is null. %3%") %
195 bufferIndex %
196 location.AsString()));
197 }
198}
199
200#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
201 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
202
203void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
204 const armnn::TensorInfo & tensorInfo,
205 uint32_t bufferId,
206 const CheckLocation & location)
207{
208 if (bufferPtr == nullptr)
209 {
210 throw ParseException(
211 boost::str(
212 boost::format("BufferPtr is null for buffer:%1%. %2%") %
213 bufferId %
214 location.AsString()));
215 }
216 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
217 tensorInfo.GetNumBytes() > bufferPtr->data.size())
218 {
219 std::stringstream ss;
220 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
221 << "For tensor: " << tensorInfo.GetShape()
222 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
223 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
224 throw ParseException(ss.str());
225 }
226}
227
228#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
229 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
230
231bool IsActivationSupported(tflite::ActivationFunctionType activationType)
232{
233 switch(activationType)
234 {
235 case tflite::ActivationFunctionType_NONE:
236 case tflite::ActivationFunctionType_RELU:
237 case tflite::ActivationFunctionType_RELU6:
238 case tflite::ActivationFunctionType_TANH:
239 {
240 return true;
241 }
242 default:
243 {
244 return false;
245 }
246 }
247}
248
249#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
250 do { \
251 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
252 { \
253 throw ParseException( \
254 boost::str( \
255 boost::format("TfLite parser doesn't suppport fused activation: " \
256 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
257 OPTION->fused_activation_function % \
258 tflite::EnumNameActivationFunctionType(\
259 OPTION->fused_activation_function) % \
260 __func__ % \
261 SUBGRAPH_INDEX % \
262 OPERATOR_INDEX % \
263 CHECK_LOCATION().FileLine())); \
264 } \
265 } while(false)
266
267
268std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
269{
270 std::vector<unsigned int> result;
271 result.reserve(in.size());
272 for (auto & i : in)
273 {
274 result.push_back(CHECKED_NON_NEGATIVE(i));
275 }
276 return result;
277}
278
279void CalcPadding(uint32_t inputSize,
280 uint32_t filterSize,
281 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100282 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100283 uint32_t& paddingFront,
284 uint32_t& paddingBack,
285 tflite::Padding padding)
286{
287 paddingFront = 0;
288 paddingBack = 0;
289 if (padding == tflite::Padding_SAME)
290 {
291 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100292 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
293 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100294 if (temp > inputSize)
295 {
296 paddingFront = (temp - inputSize) / 2;
297 paddingBack = (temp - inputSize) - paddingFront;
298 }
299 }
300}
301
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000302armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100303{
304 armnn::DataType type;
305 CHECK_TENSOR_PTR(tensorPtr);
306
307 switch (tensorPtr->type)
308 {
309 case tflite::TensorType_UINT8:
310 type = armnn::DataType::QuantisedAsymm8;
311 break;
312 case tflite::TensorType_FLOAT32:
313 type = armnn::DataType::Float32;
314 break;
315 case tflite::TensorType_INT32:
316 type = armnn::DataType::Signed32;
317 break;
318
319 default:
320 {
321 CheckLocation location = CHECK_LOCATION();
322 throw ParseException(
323 boost::str(
324 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
325 tensorPtr->type %
326 tflite::EnumNameTensorType(tensorPtr->type) %
327 tensorPtr->name %
328 location.AsString()));
329 }
330 }
331
332 float quantizationScale = 0.0f;
333 int32_t quantizationOffset = 0;
334
335 if (tensorPtr->quantization.get())
336 {
337 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
338 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
339
340 if (tensorPtr->quantization->scale.size() == 1)
341 {
342 quantizationScale = tensorPtr->quantization->scale[0];
343 }
344 if (tensorPtr->quantization->zero_point.size() == 1)
345 {
346 // NOTE: we lose precision here when converting from 64 bit to 32
347 // but this is what we support at the monent in ArmNN
348 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
349 }
350 }
351
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100352 std::vector<unsigned int> safeShape = shapes;
353 if (safeShape.size() == 0)
354 {
355 safeShape.push_back(1);
356 }
357
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // two statements (on purpose) for easier debugging:
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100359 armnn::TensorInfo result(static_cast<unsigned int>(safeShape.size()),
360 safeShape.data(),
telsoa01c577f2c2018-08-31 09:22:23 +0100361 type,
362 quantizationScale,
363 quantizationOffset);
364 return result;
365}
366
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000367armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
368{
369 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
370 return ToTensorInfo(tensorPtr, dimensions);
371}
372
telsoa01c577f2c2018-08-31 09:22:23 +0100373template<typename T>
374std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
375CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
376 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000377 armnn::TensorInfo& tensorInfo,
378 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100379{
380 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
381 BOOST_ASSERT_MSG(bufferPtr != nullptr,
382 boost::str(
383 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
384
385 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000386
387 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
388 {
389 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000390 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
391 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000392 }
393 else
394 {
395 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
396 }
397
telsoa01c577f2c2018-08-31 09:22:23 +0100398 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
399}
400
telsoa01c577f2c2018-08-31 09:22:23 +0100401armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
402{
403 // generate the binding id by shifting the tensor id by 8 bit
404 // and add the subgraph id, which allows 256 subgraphs
405 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
406}
407
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000408bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
409{
410 const unsigned int actualSize = actual.GetNumDimensions();
411 if (actualSize != expected.size())
412 {
413 return false;
414 }
415
416 for (unsigned int i = 0u; i < actualSize; i++)
417 {
418 if (expected[i] < 0 ||
419 actual[i] != static_cast<unsigned int>(expected[i]))
420 {
421 return false;
422 }
423 }
424
425 return true;
426}
427
telsoa01c577f2c2018-08-31 09:22:23 +0100428} // <anonymous>
429
430TfLiteParser::TfLiteParser()
431: m_Network(nullptr, nullptr)
432, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
433{
434 // register supported operators
435 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200436 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100437 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100438 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
439 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000440 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100441 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000442 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Matthew Jackson28c94572019-07-18 10:47:03 +0100443 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100444 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200445 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200446 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan58f39192018-09-17 14:14:39 +0100447 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
448 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100449 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -0200450 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
Sadik Armagan479045b2018-10-01 11:51:37 +0100451 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
Bruno Goncalvesbaded142019-02-08 19:02:48 -0200452 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100453 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalves451d95b2019-02-12 22:59:22 -0200454 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200455 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200456 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200457 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200458 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Matthew Jacksonbcca1f42019-07-16 11:39:21 +0100459 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200460 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
Nina Drozd0324f482019-04-08 10:52:10 +0100461 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Nina Drozd99851762019-04-09 09:37:38 +0100462 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
Keith Davis4cd29a02019-09-09 14:49:20 +0100463 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100464 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
Nina Drozd200e3802019-04-15 09:47:39 +0100465 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
telsoa01c577f2c2018-08-31 09:22:23 +0100466}
467
468void TfLiteParser::ResetParser()
469{
470 m_Network = armnn::INetworkPtr(nullptr, nullptr);
471 m_Model = nullptr;
472 m_SubgraphConnections.clear();
473}
474
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200475void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
476 size_t operatorIndex,
477 IConnectableLayer *layer)
478{
479 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
480 BOOST_ASSERT(layer != nullptr);
481
Derek Lambertiff05cc52019-04-26 13:05:17 +0100482 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
483 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200484
485 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
486
487 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100488 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200489 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100490 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200491
492 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
493 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
494
495 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
496 {
497 uint32_t id = reshapedInputId;
498 reshapedInputId = inputId;
499 inputId = id;
500
501 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
502 inputTensorInfo = ToTensorInfo(tensorPtr);
503 }
504
505 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
506
507 std::vector<unsigned> reshapedDim;
508 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
509 {
510 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
511 }
512
513 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
514 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
515
516 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
517
518 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
519 armnn::ReshapeDescriptor desc;
520 desc.m_TargetShape = reshapedTensorInfo.GetShape();
521 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
522
523 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
524 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
525
526 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
527
528 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
529 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
530}
531
telsoa01c577f2c2018-08-31 09:22:23 +0100532INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
533{
534 ResetParser();
535 m_Model = LoadModelFromFile(graphFile);
536 return CreateNetworkFromModel();
537}
538
539INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
540{
541 ResetParser();
542 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
543 return CreateNetworkFromModel();
544}
545
546INetworkPtr TfLiteParser::CreateNetworkFromModel()
547{
548 m_Network = INetwork::Create();
549 BOOST_ASSERT(m_Model.get() != nullptr);
550
551 bool failedToCreate = false;
552 std::stringstream errors;
553
554 if (m_Model->subgraphs.size() != 1)
555 {
556 throw ParseException(
557 boost::str(
558 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
559 m_Model->subgraphs.size() %
560 CHECK_LOCATION().AsString()));
561 }
562
563 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100564 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100565 {
566 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
567
568 size_t operatorIndex = 0;
569 for (OperatorPtr const & op : subgraph->operators)
570 {
571 try
572 {
telsoa01c577f2c2018-08-31 09:22:23 +0100573 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
574 auto builtinCode = opCodePtr->builtin_code;
575
576 if (builtinCode > tflite::BuiltinOperator_MAX)
577 {
578 throw ParseException(
579 boost::str(
580 boost::format("Operator code %1% is out of range 0-%2%. "
581 "subgraph:%3% operator idx:%4%. %5%") %
582 builtinCode %
583 tflite::BuiltinOperator_MAX %
584 subgraphIndex %
585 operatorIndex %
586 CHECK_LOCATION().AsString()));
587 }
588
589 // lookup and call the parser function
590 auto & parserFunction = m_ParserFunctions[builtinCode];
591 (this->*parserFunction)(subgraphIndex, operatorIndex);
592 }
593 catch (const ParseException& e)
594 {
595 failedToCreate = true;
596 std::stringstream errorString;
597
598 errorString << "Failed to parse operator #" << operatorIndex
599 << " within subgraph #" << subgraphIndex
600 << " error: " << e.what();
601 BOOST_LOG_TRIVIAL(error) << errorString.str();
602
603 errors << errorString.str() << "\n";
604 }
605 ++operatorIndex;
606 }
607
608 SetupInputLayers(subgraphIndex);
609 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200610 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100611
612 ++subgraphIndex;
613 }
614
615 if (failedToCreate)
616 {
617 // we can skip everything and let the outer exception handler deal with the error
618 throw ParseException(errors.str());
619 }
620
621 // establish the connections from the layer outputs to the inputs of the subsequent layers
622 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
623 {
624 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
625 {
626 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
627 {
628 for (size_t inputSlotIdx = 0;
629 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
630 ++inputSlotIdx)
631 {
632 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
633 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
634 }
635 }
636 }
637 }
638
639 return std::move(m_Network);
640}
641
642void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
643 size_t tensorIndex,
644 armnn::IOutputSlot* slot)
645{
646 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
647 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
648 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
649
650 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
651
652 // assuming there is only one producer for that tensor
653 if (tensorSlots.outputSlot != nullptr)
654 {
655 throw ParseException(boost::str(
656 boost::format("Another layer has already registered itself as the producer of "
657 "subgraph:%1% tensor:%2% %3%") %
658 subgraphIndex %
659 tensorIndex %
660 CHECK_LOCATION().AsString()));
661 }
662
663 tensorSlots.outputSlot = slot;
664}
665
666void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
667 size_t tensorIndex,
668 armnn::IInputSlot* slot)
669{
670 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
671 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
672 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
673
674 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
675 tensorSlots.inputSlots.push_back(slot);
676}
677
678void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
679{
680 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
681 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
682 //
683 auto opcodeIndex = operatorPtr->opcode_index;
684 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
685
686 throw ParseException(
687 boost::str(
688 boost::format("Operator not supported. "
689 "subgraph:%1% operator:%2% "
690 "opcode_index:%3% opcode:%4% / %5% %6%") %
691 subgraphIndex %
692 operatorIndex %
693 opcodeIndex %
694 opcode %
695 tflite::EnumNameBuiltinOperator(opcode) %
696 CHECK_LOCATION().AsString()));
697}
698
telsoa01c577f2c2018-08-31 09:22:23 +0100699void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
700{
701 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
702
703 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
704 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
705
706 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
707
708 Convolution2dDescriptor desc;
709 desc.m_BiasEnabled = false;
710 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
711 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000712 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100713 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
714 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000715
telsoa01c577f2c2018-08-31 09:22:23 +0100716 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
717 CHECK_VALID_SIZE(inputs.size(), 2, 3);
718
719 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
720 CHECK_VALID_SIZE(outputs.size(), 1);
721
722 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
723 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
724
725 // assuming input is NHWC
726 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
727 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
728
729 // assuming the filter is OHWI : Output, H, W, Input
730 // which is essentially the same as NHWC
731 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
732 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
733
Pablo Tellof0bd6832019-04-26 17:58:13 +0100734 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
735 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
736 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
737 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100738
Matteo Martincigh747ef822018-12-18 09:26:39 +0000739 auto filterTensorAndData = CreateConstTensor(inputs[1],
740 filterTensorInfo,
741 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100742 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100743
744 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
745
746 if (inputs.size() == 3)
747 {
748 desc.m_BiasEnabled = true;
749 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000750 auto biasTensorAndData = CreateConstTensor(inputs[2],
751 biasTensorInfo,
752 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100753 layer = m_Network->AddConvolution2dLayer(desc,
754 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100755 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100756 layerName.c_str());
757 }
758 else
759 {
760 layer = m_Network->AddConvolution2dLayer(desc,
761 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100762 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100763 layerName.c_str());
764 }
765
766 BOOST_ASSERT(layer != nullptr);
767
telsoa01c577f2c2018-08-31 09:22:23 +0100768 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000769 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100770
771 // register the input connection slots for the layer, connections are made after all layers have been created
772 // only the tensors for the inputs are relevant, exclude the const tensors
773 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000774 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100775
jimfly01c25411c2018-11-14 17:47:22 +0000776 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100777 // register the output connection slots for the layer, connections are made after all layers have been created
778 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
779 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
780}
781
782void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
783{
784 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
785
786 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
787 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
788
789 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
790
791 DepthwiseConvolution2dDescriptor desc;
792 desc.m_BiasEnabled = false;
793 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
794 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000795 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100796 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100797
798 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
799 CHECK_VALID_SIZE(inputs.size(), 2, 3);
800 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
801 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100802 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
803 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000804
telsoa01c577f2c2018-08-31 09:22:23 +0100805 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
806 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
807
Matteo Martincigh747ef822018-12-18 09:26:39 +0000808 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100809 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
810 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000811
812 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100813 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
814 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
815
Matteo Martincigh747ef822018-12-18 09:26:39 +0000816 // Reshape weights as [ H, W, I, M ]
817 filterTensorInfo.SetShape({ filterHeight,
818 filterWidth,
819 inputTensorInfo.GetShape()[3],
820 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
821
822 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
823 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
824
Pablo Tellof0bd6832019-04-26 17:58:13 +0100825 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
826 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
827 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
828 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100829
Matteo Martincigh747ef822018-12-18 09:26:39 +0000830 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100831 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100832 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
833
834 if (inputs.size() == 3)
835 {
836 desc.m_BiasEnabled = true;
837 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000838 auto biasTensorAndData = CreateConstTensor(inputs[2],
839 biasTensorInfo,
840 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100841 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
842 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100843 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100844 layerName.c_str());
845 }
846 else
847 {
848 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
849 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100850 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100851 layerName.c_str());
852 }
853 BOOST_ASSERT(layer != nullptr);
854
telsoa01c577f2c2018-08-31 09:22:23 +0100855 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000856 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100857
858 // register the input connection slots for the layer, connections are made after all layers have been created
859 // only the tensors for the inputs are relevant, exclude the const tensors
860 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000861 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100862
jimfly01c25411c2018-11-14 17:47:22 +0000863 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100864 // register the output connection slots for the layer, connections are made after all layers have been created
865 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
866 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
867}
868
Keith Davis4cd29a02019-09-09 14:49:20 +0100869void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
870{
871 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
872
873 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +0100874 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +0100875
876 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
877 CHECK_VALID_SIZE(outputs.size(), 1);
878
879 armnn::IConnectableLayer* layer = nullptr;
880 auto layerName = boost::str(boost::format("Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
881
882 PermuteDescriptor desc;
883
Kevin May85d92602019-09-27 17:21:06 +0100884 if(inputs.size() == 2)
885 {
886 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
887 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
888
889 std::vector<unsigned int> permuteShape(permuteTensorInfo.GetNumElements());
890 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
891
892 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
893
894 desc = PermuteDescriptor(permutationVector);
895 }
896
Keith Davis4cd29a02019-09-09 14:49:20 +0100897 layer = m_Network->AddPermuteLayer(desc, layerName.c_str());
898
899 BOOST_ASSERT(layer != nullptr);
900
901 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
902 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
903
904 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
905 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
906
907 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
908 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
909}
910
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100911void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
912{
913 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
914
915 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
916 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
917
918 TransposeConvolution2dDescriptor desc;
919 desc.m_BiasEnabled = false;
920 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
921 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
922 desc.m_DataLayout = armnn::DataLayout::NHWC;
923
924 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100925 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100926
927 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
928 CHECK_VALID_SIZE(outputs.size(), 1);
929
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100930 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100931 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
932
933 // TfLite uses NHWC tensors
934 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
935 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
936
937 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
938 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
939
940 CalcPadding(inputHeight,
941 filterHeight,
942 desc.m_StrideY,
943 1, // DilationY
944 desc.m_PadTop,
945 desc.m_PadBottom,
946 options->padding);
947
948 CalcPadding(inputWidth,
949 filterWidth,
950 desc.m_StrideX,
951 1, // DilationX
952 desc.m_PadLeft,
953 desc.m_PadRight,
954 options->padding);
955
956 auto filterTensorAndData = CreateConstTensor(inputs[1],
957 filterTensorInfo,
958 armnn::Optional<armnn::PermutationVector&>());
959
960 armnn::IConnectableLayer* layer = nullptr;
961 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
962
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100963 layer = m_Network->AddTransposeConvolution2dLayer(desc,
964 filterTensorAndData.first,
965 EmptyOptional(),
966 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100967
968 BOOST_ASSERT(layer != nullptr);
969
970 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
971 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
972
973 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
974 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100975 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100976
977 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
978 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
979}
980
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100981void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
982{
983 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
984}
985
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200986void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
987{
988 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
989
990 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
991 CHECK_VALID_SIZE(inputs.size(), 3);
992
993 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
994 CHECK_VALID_SIZE(outputs.size(), 1);
995
996 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
997 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
998
999 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1000 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1001
1002 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1003 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1004
1005 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1006 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1007
1008 size_t step = 2;
1009 std::vector<std::pair<unsigned int, unsigned int>> crops;
1010 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1011 {
1012 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1013 }
1014
1015 armnn::BatchToSpaceNdDescriptor desc;
1016 desc.m_BlockShape = blockShape;
1017 desc.m_Crops = crops;
1018 desc.m_DataLayout = armnn::DataLayout::NHWC;
1019
1020 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1021
1022 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1023 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1024
1025 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1026
1027 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1028 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1029
1030 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1031 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1032}
1033
Matthew Jackson28c94572019-07-18 10:47:03 +01001034void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1035{
1036 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1037
1038 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1039 CHECK_VALID_SIZE(inputs.size(), 1);
1040
1041 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1042 CHECK_VALID_SIZE(outputs.size(), 1);
1043
1044 L2NormalizationDescriptor desc;
1045 desc.m_DataLayout = armnn::DataLayout::NHWC;
1046 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1047 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1048
1049 BOOST_ASSERT(layer != nullptr);
1050
1051 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1052 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1053
1054 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1055 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1056
1057 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1058 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1059}
1060
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001061void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1062{
1063 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1064}
1065
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001066void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1067{
1068 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1069
1070 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1071 CHECK_VALID_SIZE(inputs.size(), 2);
1072
1073 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1074 CHECK_VALID_SIZE(outputs.size(), 1);
1075
1076 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1077 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1078
1079 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1080 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1081
1082 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1083 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1084
1085 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1086 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1087 {
1088 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1089 }
1090 else
1091 {
1092 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1093 }
1094
1095 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1096 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1097}
1098
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001099void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1100{
1101 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1102
1103 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1104 CHECK_VALID_SIZE(inputs.size(), 2);
1105
1106 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1107 CHECK_VALID_SIZE(outputs.size(), 1);
1108
1109 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1110 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1111
1112 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1113 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1114
1115 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1116 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1117
1118 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1119 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1120 {
1121 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1122 }
1123 else
1124 {
1125 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1126 }
1127
1128 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1129 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1130}
1131
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001132void TfLiteParser::ParsePool(size_t subgraphIndex,
1133 size_t operatorIndex,
1134 PoolingAlgorithm algorithm)
1135{
1136 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1137
1138 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1139 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1140
1141 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1142
1143 std::string layerName;
1144
1145 switch (algorithm)
1146 {
1147 case PoolingAlgorithm::Average:
1148 layerName =
1149 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1150 break;
1151 case PoolingAlgorithm::Max:
1152 layerName =
1153 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1154 break;
1155 default:
1156 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1157 }
1158
1159 Pooling2dDescriptor desc;
1160
1161 desc.m_PoolType = algorithm;
1162 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1163 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1164 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1165 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1166 desc.m_PaddingMethod = PaddingMethod::Exclude;
1167 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001168 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001169
1170 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1171 CHECK_VALID_SIZE(inputs.size(), 1);
1172 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1173
1174 // assuming input is NHWC
1175 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1176 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1177
Pablo Tellof0bd6832019-04-26 17:58:13 +01001178 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1179 desc.m_PadTop, desc.m_PadBottom, options->padding);
1180 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1181 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001182
1183 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1184 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001185
1186 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1187
1188 BOOST_ASSERT(layer != nullptr);
1189
jimfly01c25411c2018-11-14 17:47:22 +00001190 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1191 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001192
1193 // register the input connection slots for the layer, connections are made after all layers have been created
1194 // only the tensors for the inputs are relevant, exclude the const tensors
1195 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001196 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001197
jimfly01c25411c2018-11-14 17:47:22 +00001198 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001199 // register the output connection slots for the layer, connections are made after all layers have been created
1200 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1201 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1202}
1203
telsoa01c577f2c2018-08-31 09:22:23 +01001204void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1205{
1206 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1207 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1208 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1209
1210 SoftmaxDescriptor desc;
1211 desc.m_Beta = options->beta;
1212
1213 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1214 CHECK_VALID_SIZE(inputs.size(), 1);
1215 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1216 CHECK_VALID_SIZE(outputs.size(), 1);
1217
1218 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1219 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1220
1221 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1222 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1223
1224 // register the input connection slots for the layer, connections are made after all layers have been created
1225 // only the tensors for the inputs are relevant, exclude the const tensors
1226 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1227 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1228
1229 // register the output connection slots for the layer, connections are made after all layers have been created
1230 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1231 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1232}
1233
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001234void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1235{
1236 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1237
1238 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1239 CHECK_VALID_SIZE(inputs.size(), 3);
1240
1241 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1242 CHECK_VALID_SIZE(outputs.size(), 1);
1243
1244 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1245 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1246
1247 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1248 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1249
1250 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1251 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1252
1253 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1254 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1255
1256 size_t step = 2;
1257 std::vector<std::pair<unsigned int, unsigned int>> padList;
1258 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1259 {
1260 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1261 }
1262
1263 armnn::SpaceToBatchNdDescriptor desc;
1264 desc.m_BlockShape = blockShape;
1265 desc.m_PadList = padList;
1266 desc.m_DataLayout = armnn::DataLayout::NHWC;
1267
1268 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1269
1270 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1271 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1272
1273 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1274
1275 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1276 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1277
1278 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1279 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1280}
1281
telsoa01c577f2c2018-08-31 09:22:23 +01001282armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1283 const armnn::TensorInfo & inputTensorInfo)
1284{
1285 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1286 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1287 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1288
1289 if (inputTensorInfo.GetNumDimensions() > 4)
1290 {
1291 std::stringstream ss;
1292 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1293 << " shape:" << inputTensorInfo.GetShape() << " "
1294 << CHECK_LOCATION().AsString();
1295 throw ParseException(ss.str());
1296 }
1297
1298 if (squeezeDims.empty())
1299 {
1300 squeezeDims.assign(dimensionSequence,
1301 dimensionSequence+inputTensorInfo.GetNumDimensions());
1302 }
1303
1304 std::vector<uint32_t> outputDims;
1305 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1306 {
1307 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1308 auto currentDimension = inputTensorInfo.GetShape()[i];
1309 if (skipSqueeze || currentDimension != 1)
1310 {
1311 outputDims.push_back(currentDimension);
1312 }
1313 }
1314
1315 if (outputDims.size() > 4)
1316 {
1317 std::stringstream ss;
1318 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1319 << " shape:" << inputTensorInfo.GetShape() << " "
1320 << CHECK_LOCATION().AsString();
1321 throw ParseException(ss.str());
1322 }
1323
1324 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1325 outputDims.data());
1326
1327 // we need to preserve the tensor type and the quantization data as well
1328 TensorInfo outTensorInfo = inputTensorInfo;
1329 outTensorInfo.SetShape(outShape);
1330
1331 return outTensorInfo;
1332}
1333
1334void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1335{
1336 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1337
1338 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1339 CHECK_VALID_SIZE(inputs.size(), 1);
1340
1341 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1342 CHECK_VALID_SIZE(outputs.size(), 1);
1343
1344 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1345 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1346
1347 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1348 armnn::TensorInfo outputTensorInfo =
1349 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1350 inputTensorInfo);
1351
1352 ReshapeDescriptor reshapeDesc;
1353 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1354
1355 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1356 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1357 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1358
1359 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1360 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1361
1362 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1363 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1364}
1365
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001366void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1367{
1368 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1369
1370 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1371 CHECK_VALID_SIZE(inputs.size(), 4);
1372
1373 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1374 CHECK_VALID_SIZE(outputs.size(), 1);
1375
1376 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1377 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1378
1379 StridedSliceDescriptor desc;
1380 desc.m_BeginMask = options->begin_mask;
1381 desc.m_EllipsisMask = options->ellipsis_mask;
1382 desc.m_EndMask = options->end_mask;
1383 desc.m_NewAxisMask = options->new_axis_mask;
1384 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1385 desc.m_DataLayout = armnn::DataLayout::NHWC;
1386
1387 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1388 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1389
1390 std::vector<int> begin(beginTensorInfo.GetNumElements());
1391 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1392
1393 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1394 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1395
1396 std::vector<int> end(endTensorInfo.GetNumElements());
1397 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1398
1399 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1400 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1401
1402 std::vector<int> stride(strideTensorInfo.GetNumElements());
1403 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1404
1405 desc.m_Begin = begin;
1406 desc.m_End = end;
1407 desc.m_Stride = stride;
1408
1409 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1410 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1411
1412 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1413 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1414
1415 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1416 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1417
1418 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1419 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1420}
1421
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001422void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1423{
1424 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1425
1426 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1427 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1428
1429 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1430 CHECK_VALID_SIZE(inputs.size(), 2);
1431
1432 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1433 CHECK_VALID_SIZE(outputs.size(), 1);
1434
1435 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1436 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1437
1438 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1439 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1440
1441 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1442 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1443
1444 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1445 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1446 {
1447 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1448 }
1449 else
1450 {
1451 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1452 }
1453
1454 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1455
1456 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1457 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1458}
1459
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001460void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1461{
1462 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1463
1464 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1465 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1466
1467 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1468 CHECK_VALID_SIZE(inputs.size(), 2);
1469
1470 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1471 CHECK_VALID_SIZE(outputs.size(), 1);
1472
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001473 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1474 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1475
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001476 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1477 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1478
1479 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1480 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1481
1482 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001483 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1484 {
1485 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1486 }
1487 else
1488 {
1489 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1490 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001491
1492 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1493
1494 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1495 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1496}
1497
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001498void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1499{
1500 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1501
1502 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1503 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1504
1505 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1506 CHECK_VALID_SIZE(inputs.size(), 2);
1507
1508 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1509 CHECK_VALID_SIZE(outputs.size(), 1);
1510
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001511 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1512 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1513
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001514 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1515 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1516
1517 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1518 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1519
1520 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001521 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1522 {
1523 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1524 }
1525 else
1526 {
1527 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1528 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001529
1530 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1531
1532 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1533 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1534}
1535
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001536void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1537{
1538 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1539
1540 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1541
1542 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1543 CHECK_VALID_SIZE(outputs.size(), 1);
1544
1545 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1546 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1547
1548 armnn::MeanDescriptor desc;
1549 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1550 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1551 desc.m_Axis = axis;
1552
1553 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1554 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1555
1556 desc.m_KeepDims =
1557 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1558 true : false;
1559
1560 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1561 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1562
1563 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1564
1565 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1566 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1567
1568 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1569 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1570}
1571
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001572void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1573{
1574 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1575
1576 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1577
1578 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1579 CHECK_VALID_SIZE(outputs.size(), 1);
1580
1581 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1582 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1583
1584 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1585 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1586
1587 size_t step = 2;
1588 armnn::PadDescriptor desc;
1589 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1590 {
1591 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1592 }
1593
1594 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1595 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1596
1597 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1598 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1599
1600 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1601 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1602
1603 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1604 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1605}
1606
Finn Williamsc42c3842019-01-22 14:18:11 +00001607
Sadik Armagan58f39192018-09-17 14:14:39 +01001608void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1609{
Finn Williamsc42c3842019-01-22 14:18:11 +00001610 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001611}
1612
1613void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1614{
Finn Williamsc42c3842019-01-22 14:18:11 +00001615 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1616}
Sadik Armagan58f39192018-09-17 14:14:39 +01001617
Finn Williamsc42c3842019-01-22 14:18:11 +00001618void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1619{
1620 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1621}
1622
Nina Drozd99851762019-04-09 09:37:38 +01001623void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1624{
1625 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1626}
1627
Finn Williamsc42c3842019-01-22 14:18:11 +00001628
1629void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1630{
1631 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001632 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1633 boost::ignore_unused(operatorPtr);
1634
1635 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1636 CHECK_VALID_SIZE(inputs.size(), 1);
1637
1638 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1639 CHECK_VALID_SIZE(outputs.size(), 1);
1640
Finn Williamsc42c3842019-01-22 14:18:11 +00001641 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001642 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001643 activationDesc.m_Function = activationType;
1644
1645 switch (activationType)
1646 {
1647 case ActivationFunction::ReLu:
1648 {
1649 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1650 break;
1651 }
1652 case ActivationFunction::BoundedReLu:
1653 {
1654 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1655 activationDesc.m_A = 6.0f;
1656 activationDesc.m_B = 0.0f;
1657 break;
1658 }
1659 case ActivationFunction::Sigmoid:
1660 {
1661 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1662 break;
1663 }
Nina Drozd99851762019-04-09 09:37:38 +01001664 case ActivationFunction::TanH:
1665 {
1666 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1667 activationDesc.m_A = 1.0f;
1668 activationDesc.m_B = 1.0f;
1669 break;
1670 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001671 default:
1672 {
1673 throw ParseException(
1674 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1675 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1676 }
1677 }
1678
1679 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001680
1681 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1682 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1683
1684 // register the input connection slots for the layer, connections are made after all layers have been created
1685 // only the tensors for the inputs are relevant, exclude the const tensors
1686 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1687 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1688
1689 // register the output connection slots for the layer, connections are made after all layers have been created
1690 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1691 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1692}
Sadikb94967b2018-09-19 15:30:00 +01001693armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1694 const std::vector<int32_t> & targetDimsIn)
1695{
1696 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1697 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1698
1699 if (stretchDim != targetDimsIn.end())
1700 {
1701 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1702 {
1703 throw ParseException(
1704 boost::str(
1705 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1706 }
1707
1708 auto targetNumElements =
1709 boost::numeric_cast<unsigned int>(
1710 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1711
1712 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1713 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1714 }
1715
1716 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1717
1718 TensorInfo reshapeInfo = inputTensorInfo;
1719 reshapeInfo.SetShape(outputShape);
1720
1721 return reshapeInfo;
1722}
1723
1724void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1725{
1726 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1727
1728 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001729
1730 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1731 CHECK_VALID_SIZE(outputs.size(), 1);
1732
1733 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1734 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1735
1736 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001737 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1738 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001739 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1740
kevmay0171972a82018-12-17 14:28:03 +00001741 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001742 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1743 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001744 {
1745 std::stringstream ss;
1746 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001747 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001748 << " does not equal output shape "
1749 << actualOutputTensorInfo.GetShape()
1750 << ": "
1751 << CHECK_LOCATION().AsString();
1752 throw ParseException(ss.str());
1753 }
1754
Sadikb94967b2018-09-19 15:30:00 +01001755 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001756 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001757
1758 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1759 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001760 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001761
1762 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1763 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1764
1765 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1766 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1767}
1768
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001769void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1770{
1771 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1772
1773 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1774 CHECK_VALID_SIZE(inputs.size(), 2);
1775
1776 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1777 CHECK_VALID_SIZE(outputs.size(), 1);
1778
1779 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1780
1781 // Data for the parsed tensor args (size) must be stored locally.
1782 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1783
1784 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1785 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1786
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001787 ResizeDescriptor desc;
1788 desc.m_Method = armnn::ResizeMethod::Bilinear;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001789 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001790 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1791 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001792
1793 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001794 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001795
1796 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1797 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1798
1799 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1800 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1801
1802 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1803 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1804}
1805
Sadik Armagan479045b2018-10-01 11:51:37 +01001806void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1807{
1808 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1809
1810 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1811 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1812
1813 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1814
1815 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1816 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1817 CHECK_VALID_SIZE(outputs.size(), 1);
1818
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001819 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1820 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001821
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001822 const unsigned int concatDimInput = static_cast<unsigned int>(
1823 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001824
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001825 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1826 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001827
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001828 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001829
1830 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1831 {
1832 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1833
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001834 // This set up concatDescriptor view origin
1835 armnnUtils::ProcessConcatInputTensorInfo(
1836 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001837 }
1838
1839 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01001840 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01001841
1842 BOOST_ASSERT(layer != nullptr);
1843
1844 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1845 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001846
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001847 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001848
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001849 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001850
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001851 // add fused activation layer
1852 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001853
Sadik Armagan479045b2018-10-01 11:51:37 +01001854 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1855 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1856}
1857
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001858void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1859{
1860 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1861
1862 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1863 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1864
1865 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1866
1867 FullyConnectedDescriptor desc;
1868 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001869 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001870
1871 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1872 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1873 CHECK_VALID_SIZE(outputs.size(), 1);
1874
1875 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1876
1877 // Fully Connected Layer accepts two dimensional weights input
1878 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1879 if (weightsDimension != 2)
1880 {
1881 throw ParseException(
1882 boost::str(
1883 boost::format(
1884 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1885 "Node %2%")
1886 % weightsDimension
1887 % CHECK_LOCATION().AsString()));
1888 }
1889
Matteo Martincigh747ef822018-12-18 09:26:39 +00001890 auto filterTensorAndData = CreateConstTensor(inputs[1],
1891 filterTensorInfo,
1892 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001893 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001894 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1895
1896 if (inputs.size() == 3)
1897 {
1898 desc.m_BiasEnabled = true;
1899 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001900 auto biasTensorAndData = CreateConstTensor(inputs[2],
1901 biasTensorInfo,
1902 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001903 layer = m_Network->AddFullyConnectedLayer(desc,
1904 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001905 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001906 layerName.c_str());
1907 }
1908 else
1909 {
1910 layer = m_Network->AddFullyConnectedLayer(desc,
1911 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001912 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001913 layerName.c_str());
1914 }
1915 BOOST_ASSERT(layer != nullptr);
1916
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001917 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1918
1919 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1920
1921 if (inputTensorInfo.GetNumDimensions() > 2)
1922 {
1923 // Add reshape to flatten to 2D [batch_size, input_size],
1924 // where "input_size" corresponds to the number of inputs to the layer,
1925 // matching the second dimension of weights,
1926 // and "batch_size" is calculated by dividing the number of elements by "input_size".
1927 std::vector<unsigned int> reshapedDimensions(2);
1928 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
1929 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
1930
1931 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
1932 {
1933 throw ParseException(
1934 boost::str(
1935 boost::format(
1936 "Failed to deduce input tensor shape from filter size %1%")
1937 % reshapedDimensions[1]
1938 % CHECK_LOCATION().AsString()));
1939 }
1940
1941 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
1942 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
1943
1944 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
1945 armnn::ReshapeDescriptor desc;
1946 desc.m_TargetShape = reshapedTensorInfo.GetShape();
1947 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
1948
1949 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
1950 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1951
1952 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
1953 }
1954 else
1955 {
1956 // register the input connection slot for the layer
1957 // only the tensors for the inputs are relevant, exclude the const tensors
1958 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1959 }
1960
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001961 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1962 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1963
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001964 // we need to add the activation layer and fortunately we don't need to care about the data layout
1965 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1966 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001967
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001968 // register the output connection slots for the layer, connections are made after all layers have been created
1969 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1970 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1971}
1972
keidav011b3e2ea2019-02-21 10:07:37 +00001973void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1974{
1975 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1976
1977 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1978
1979 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1980 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1981 CHECK_VALID_SIZE(outputs.size(), 4);
1982
1983 // Obtain custom options from flexbuffers
1984 auto custom_options = operatorPtr->custom_options;
1985 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1986
1987 // Obtain descriptor information from tf lite
1988 DetectionPostProcessDescriptor desc;
1989 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1990 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1991 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1992 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1993 desc.m_NumClasses = m["num_classes"].AsUInt32();
1994 desc.m_ScaleH = m["h_scale"].AsFloat();
1995 desc.m_ScaleW = m["w_scale"].AsFloat();
1996 desc.m_ScaleX = m["x_scale"].AsFloat();
1997 desc.m_ScaleY = m["y_scale"].AsFloat();
1998
keidav0107d58c72019-02-26 11:57:39 +00001999 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002000 {
keidav0107d58c72019-02-26 11:57:39 +00002001 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002002 }
2003 if (!(m["detections_per_class"].IsNull()))
2004 {
2005 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2006 }
2007
2008 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2009 {
2010 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2011 "must be positive and less than or equal to 1.");
2012 }
2013
2014 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2015 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2016 armnn::Optional<armnn::PermutationVector&>());
2017
2018 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2019 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2020 layerName.c_str());
2021
2022 BOOST_ASSERT(layer != nullptr);
2023
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002024 // The model does not specify the output shapes.
2025 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2026 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2027 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2028 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2029 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2030 m_OverridenOutputShapes.push_back({ 1 });
2031
keidav011b3e2ea2019-02-21 10:07:37 +00002032 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2033 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002034 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002035 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2036 }
2037
2038 // Register the input connection slots for the layer, connections are made after all layers have been created
2039 // only the tensors for the inputs are relevant, exclude the const tensors
2040 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2041 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2042
2043 // Register the output connection slots for the layer, connections are made after all layers have been created
2044 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2045 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2046 outputTensorIndexes[1],
2047 outputTensorIndexes[2],
2048 outputTensorIndexes[3]});
2049}
2050
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002051/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2052void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2053{
2054 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2055
2056 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2057 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2058 CHECK_VALID_SIZE(outputs.size(), 1);
2059
2060 if (inputs.size() < 1)
2061 {
2062 throw ParseException("Pack must have at least one input.");
2063 }
2064
2065 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2066 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2067
2068 StackDescriptor desc;
2069 desc.m_Axis = static_cast<uint32_t>(options->axis);
2070 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2071
2072 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2073 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2074 desc.m_InputShape = inputTensorInfo.GetShape();
2075
2076 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2077 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2078
2079 BOOST_ASSERT(layer != nullptr);
2080
2081 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2082 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2083
2084 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2085 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2086
2087 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2088 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2089}
2090
Nina Drozd200e3802019-04-15 09:47:39 +01002091void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2092{
2093 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2094
2095 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2096 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2097
2098 // This unpackAxis indicates the axis to unpack
2099 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2100
2101 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2102 CHECK_VALID_SIZE(inputs.size(), 1);
2103
2104 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002105
2106 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2107 {
2108 throw ParseException(
2109 boost::str(
2110 boost::format(
2111 "The unpack axis: %1% cannot be greater than or equal to "
2112 "the number of input dimension %2% %3%")
2113 % unpackAxis
2114 % inputTensorInfo.GetNumDimensions()
2115 % CHECK_LOCATION().AsString()));
2116 }
2117
Nina Drozd200e3802019-04-15 09:47:39 +01002118 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2119 // If num is not defined, automatically infer from the length of the dimension axis.
2120 if(unpackNum == 0)
2121 {
2122 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2123 }
2124
2125 // If unpack number cannot be inferred and is still zero, throw ParseException.
2126 if(unpackNum == 0)
2127 {
2128 throw ParseException("Number to unpack must greater than zero.");
2129 }
2130
2131 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2132 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2133
2134 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2135 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2136
2137 // Add current input shape to unpackDimSizes
2138 for (unsigned int i = 0; i < inputDimSize; ++i)
2139 {
2140 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2141 }
2142
2143 if (unpackDimSizes[unpackAxis] != unpackNum)
2144 {
2145 throw ParseException("Number to unpack must be the same as length of the dimension to "
2146 "unpack along.");
2147 }
2148
2149 unpackDimSizes[unpackAxis] /= unpackNum;
2150
2151 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2152 for (unsigned int j = 0; j < unpackNum; ++j)
2153 {
2154 // Set the size of the views.
2155 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2156 {
2157 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2158 }
2159 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2160 }
2161
2162 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2163 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2164
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002165 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2166 unpackDimSizes.data());
2167
Nina Drozd200e3802019-04-15 09:47:39 +01002168 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2169 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2170
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002171 // Reshape to remove unpacked dimension
2172 unsigned int reshapedNumDimensions = inputDimSize - 1;
2173 std::vector<unsigned int> reshapedDimensions(reshapedNumDimensions);
Nina Drozd200e3802019-04-15 09:47:39 +01002174
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002175 unsigned int reshapeIndex = 0;
2176 for (unsigned int i = 0; i < inputDimSize; ++i)
Nina Drozd200e3802019-04-15 09:47:39 +01002177 {
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002178 if (i == unpackAxis)
2179 {
2180 continue;
2181 }
2182 reshapedDimensions[reshapeIndex++] = unpackDimSizes[i];
Nina Drozd200e3802019-04-15 09:47:39 +01002183 }
2184
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002185 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2186 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2187 {
2188 armnn::TensorInfo reshapedTensorInfo = inputTensorInfo;
2189 reshapedTensorInfo.SetShape(armnn::TensorShape{ reshapedNumDimensions, reshapedDimensions.data() });
2190
2191 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2192 armnn::ReshapeDescriptor desc;
2193 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2194 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2195
2196 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape, inputTensorInfo.GetDataType()));
2197 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2198
2199 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2200
2201 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2202 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2203 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2204 }
Nina Drozd200e3802019-04-15 09:47:39 +01002205}
2206
Nina Drozd0324f482019-04-08 10:52:10 +01002207void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2208{
2209 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2210
2211 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2212 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2213
2214 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2215
Nina Drozd200e3802019-04-15 09:47:39 +01002216 // If number of splits cannot be inferred and is zero, throw ParseException.
2217 if(numSplits == 0)
2218 {
2219 throw ParseException("Number to splits must greater than zero.");
2220 }
2221
Nina Drozd0324f482019-04-08 10:52:10 +01002222 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2223 CHECK_VALID_SIZE(inputs.size(), 2);
2224 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2225 CHECK_VALID_SIZE(outputs.size(), numSplits);
2226
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002227 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2228 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002229
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002230 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2231 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2232 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2233
2234 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2235 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002236
2237 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2238 if (splitDim == 0 || splitDim == 2)
2239 {
2240 throw ParseException(
2241 boost::str(
2242 boost::format(
2243 "Dimension %1% for split is not supported by Armnn. %2%")
2244 % splitDim
2245 % CHECK_LOCATION().AsString()));
2246 }
2247
2248 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002249 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002250 {
2251 throw ParseException(
2252 boost::str(
2253 boost::format(
2254 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002255 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002256 % inputTensorInfo.GetNumDimensions()
2257 % MaxNumOfTensorDimensions
2258 % CHECK_LOCATION().AsString()));
2259 }
2260
2261 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2262
2263 // Add current input shape to splitterDimSizes
2264 for (unsigned int i = 0; i < inputDimSize; ++i)
2265 {
2266 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2267 }
2268
2269 if (splitterDimSizes[splitDim] % numSplits != 0)
2270 {
2271 throw ParseException("Number of splits must evenly divide the dimension");
2272 }
2273 splitterDimSizes[splitDim] /= numSplits;
2274
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002275 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002276 for (unsigned int j = 0; j < numSplits; ++j)
2277 {
2278 // Set the size of the views.
2279 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2280 {
2281 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2282 }
2283 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2284 }
2285
2286 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2287 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2288
2289 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002290 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002291
2292 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2293 splitterDimSizes.data());
2294
2295 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2296 {
2297 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(outShape,
2298 inputTensorInfo.GetDataType()));
2299 }
2300
2301 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2302 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2303}
2304
Sadik Armagan58f39192018-09-17 14:14:39 +01002305armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2306 unsigned int outputSlot,
2307 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002308{
2309 ActivationDescriptor activationDesc;
2310 std::string layerName = prevLayer->GetName();
2311
2312 switch(activationType)
2313 {
2314 case tflite::ActivationFunctionType_NONE:
2315 {
2316 // this is a no-op: return previous layer
2317 return prevLayer;
2318 }
2319 case tflite::ActivationFunctionType_RELU:
2320 {
2321 activationDesc.m_Function = ActivationFunction::ReLu;
2322 layerName += ":RELU";
2323 break;
2324 }
2325 case tflite::ActivationFunctionType_RELU6:
2326 {
2327 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2328 activationDesc.m_A = 6.0f;
2329 activationDesc.m_B = 0.0f;
2330 layerName += ":RELU6";
2331 break;
2332 }
2333 case tflite::ActivationFunctionType_TANH:
2334 {
2335 activationDesc.m_Function = ActivationFunction::TanH;
2336 activationDesc.m_A = 1.0f;
2337 activationDesc.m_B = 1.0f;
2338 layerName += ":TANH";
2339 break;
2340 }
2341
2342 // I only put these here as a reminder what others we could support
2343 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2344 case tflite::ActivationFunctionType_SIGN_BIT:
2345 default:
2346 {
2347 throw ParseException(
2348 boost::str(
2349 boost::format("TfLite parser doesn't suppport fused activation: "
2350 "%1%/%2% %3% ") %
2351 activationType %
2352 tflite::EnumNameActivationFunctionType(activationType) %
2353 CHECK_LOCATION().AsString()));
2354
2355 }
2356 }
2357
2358 IConnectableLayer* activationLayer =
2359 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2360
2361 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2362 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2363 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2364 return activationLayer;
2365}
2366
2367TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2368{
2369 if (fileName == nullptr)
2370 {
2371 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2372 CHECK_LOCATION().AsString()));
2373 }
2374 boost::system::error_code errorCode;
2375 boost::filesystem::path pathToFile(fileName);
2376 if (!boost::filesystem::exists(pathToFile, errorCode))
2377 {
2378 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2379 fileName %
2380 errorCode %
2381 CHECK_LOCATION().AsString()));
2382 }
2383 std::ifstream file(fileName, std::ios::binary);
2384 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2385 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2386 fileContent.size());
2387}
2388
2389TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2390{
2391 if (binaryContent == nullptr)
2392 {
2393 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2394 CHECK_LOCATION().AsString()));
2395 }
2396 flatbuffers::Verifier verifier(binaryContent, len);
2397 if (verifier.VerifyBuffer<tflite::Model>() == false)
2398 {
2399 throw ParseException(
2400 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2401 "flatbuffers format. size:%1% %2%") %
2402 len %
2403 CHECK_LOCATION().AsString()));
2404 }
2405 return tflite::UnPackModel(binaryContent);
2406}
2407
2408TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2409 size_t subgraphIndex,
2410 size_t operatorIndex)
2411{
2412 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2413
Derek Lambertiff05cc52019-04-26 13:05:17 +01002414 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2415 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002416
2417 size_t inputCount = operatorPtr->inputs.size();
2418 TensorRawPtrVector result(inputCount);
2419 for (size_t i=0; i<inputCount; ++i)
2420 {
2421 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002422 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002423 }
2424 return result;
2425}
2426
2427TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2428 size_t subgraphIndex,
2429 size_t operatorIndex)
2430{
2431 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2432
Derek Lambertiff05cc52019-04-26 13:05:17 +01002433 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2434 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002435
2436 size_t outputCount = operatorPtr->outputs.size();
2437 TensorRawPtrVector result(outputCount);
2438 for (size_t i=0; i<outputCount; ++i)
2439 {
2440 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2441 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002442 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002443 }
2444 return result;
2445}
2446
2447TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2448 size_t subgraphIndex)
2449{
2450 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002451 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002452
Derek Lambertiff05cc52019-04-26 13:05:17 +01002453 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002454 TensorIdRawPtrVector result(inputCount);
2455 for (size_t i=0; i<inputCount; ++i)
2456 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002457 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002458 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002459 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002460 }
2461 return result;
2462}
2463
2464TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2465 size_t subgraphIndex)
2466{
2467 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002468 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002469
Derek Lambertiff05cc52019-04-26 13:05:17 +01002470 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002471 TensorIdRawPtrVector result(outputCount);
2472 for (size_t i=0; i<outputCount; ++i)
2473 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002474 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2475 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002476 }
2477 return result;
2478}
2479
2480std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2481 size_t subgraphIndex,
2482 size_t operatorIndex)
2483{
2484 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002485 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2486 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002487 return operatorPtr->inputs;
2488}
2489
2490std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2491 size_t subgraphIndex,
2492 size_t operatorIndex)
2493{
2494 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002495 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2496 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002497 return operatorPtr->outputs;
2498}
2499
2500void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2501 size_t operatorIndex,
2502 IConnectableLayer* layer,
2503 const std::vector<unsigned int>& tensorIndexes)
2504{
2505 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2506 BOOST_ASSERT(layer != nullptr);
2507 if (tensorIndexes.size() != layer->GetNumInputSlots())
2508 {
2509 throw ParseException(
2510 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2511 " for subgraph:%3% operator index:%4% %5%") %
2512 tensorIndexes.size() %
2513 layer->GetNumInputSlots() %
2514 subgraphIndex %
2515 operatorIndex %
2516 CHECK_LOCATION().AsString()));
2517 }
2518
2519 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2520 {
2521 unsigned int tensorIndex = tensorIndexes[slotIndex];
2522 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2523 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2524 }
2525}
2526
2527void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2528 size_t operatorIndex,
2529 IConnectableLayer* layer,
2530 const std::vector<unsigned int>& tensorIndexes)
2531{
2532 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2533 BOOST_ASSERT(layer != nullptr);
2534 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2535 {
2536 throw ParseException(
2537 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2538 " for subgraph:%3% operator index:%4% %5%") %
2539 tensorIndexes.size() %
2540 layer->GetNumOutputSlots() %
2541 subgraphIndex %
2542 operatorIndex %
2543 CHECK_LOCATION().AsString()));
2544 }
2545
2546 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2547 {
2548 unsigned int tensorIndex = tensorIndexes[slotIndex];
2549 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2550 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2551 }
2552}
2553
2554void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2555{
2556 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2557
2558 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2559 for (auto const & tensorIdAndPtr : inputs)
2560 {
2561 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2562 IConnectableLayer* layer =
2563 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2564
2565 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2566 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2567
2568 RegisterOutputSlots(subgraphIndex,
2569 VIRTUAL_OPERATOR_ID,
2570 layer,
2571 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2572 }
2573}
2574
2575void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2576{
2577 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2578
2579 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2580 for (auto const & tensorIdAndPtr : outputs)
2581 {
2582 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2583 IConnectableLayer* layer =
2584 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2585
2586 RegisterInputSlots(subgraphIndex,
2587 VIRTUAL_OPERATOR_ID,
2588 layer,
2589 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2590 }
2591}
2592
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002593void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2594{
2595 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2596
Derek Lambertiff05cc52019-04-26 13:05:17 +01002597 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002598 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2599 {
2600 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2601 {
2602 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2603 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2604 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002605 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002606 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2607 auto tensorAndData = CreateConstTensor(tensorPtr,
2608 tensorInfo,
2609 armnn::Optional<armnn::PermutationVector&>());
2610
2611 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2612 IConnectableLayer *layer =
2613 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2614
2615 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2616 RegisterOutputSlots(subgraphIndex,
2617 VIRTUAL_OPERATOR_ID,
2618 layer,
2619 { tensorIndex });
2620
2621 }
2622 }
2623 }
2624}
2625
telsoa01c577f2c2018-08-31 09:22:23 +01002626// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2627TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2628{
2629 CHECK_BUFFER(model, bufferIndex);
2630 return model->buffers[bufferIndex].get();
2631}
2632
Matteo Martincigh747ef822018-12-18 09:26:39 +00002633template<typename T>
2634std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2635TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2636 TfLiteParser::TensorRawPtr tensorPtr,
2637 armnn::TensorInfo& tensorInfo,
2638 armnn::Optional<armnn::PermutationVector&> permutationVector)
2639{
2640 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2641 tensorPtr,
2642 tensorInfo,
2643 permutationVector);
2644 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2645 return std::make_pair(constData.first, std::move(storage));
2646}
2647
telsoa01c577f2c2018-08-31 09:22:23 +01002648std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2649TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002650 armnn::TensorInfo& tensorInfo,
2651 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002652{
2653 CHECK_TENSOR_PTR(tensorPtr);
2654 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2655 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2656
2657 switch (tensorInfo.GetDataType())
2658 {
2659 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002660 return CreateConstTensorAndStoreData<float>(bufferPtr,
2661 tensorPtr,
2662 tensorInfo,
2663 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002664 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002665 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2666 tensorPtr,
2667 tensorInfo,
2668 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002669 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002670 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2671 tensorPtr,
2672 tensorInfo,
2673 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002674 default:
2675 {
2676 std::stringstream errString;
2677 errString << "Unexpected datatype when creating const tensor: "
2678 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2679 << " shape:" << tensorInfo.GetShape()
2680 << CHECK_LOCATION().AsString();
2681 throw ParseException(errString.str());
2682 }
2683 }
2684}
2685
2686BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2687 const std::string& name) const
2688{
2689 CHECK_SUBGRAPH(m_Model, subgraphId);
2690 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2691 for (auto const & input : inputs)
2692 {
2693 if (input.second->name == name)
2694 {
2695 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2696 return std::make_pair(bindingId, ToTensorInfo(input.second));
2697 }
2698 }
2699
2700 std::stringstream bindings;
2701 for (auto const & input : inputs)
2702 {
2703 bindings << "'" << input.second->name << "' ";
2704 }
2705
2706 throw ParseException(
2707 boost::str(
2708 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2709 "Possible inputs are: [%3%] %4%") %
2710 subgraphId %
2711 name %
2712 bindings.str() %
2713 CHECK_LOCATION().AsString()));
2714}
2715
2716BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2717 const std::string& name) const
2718{
2719 CHECK_SUBGRAPH(m_Model, subgraphId);
2720 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002721 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002722 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002723 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002724 if (output.second->name == name)
2725 {
2726 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002727 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2728 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2729 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002730 }
2731 }
2732
2733 std::stringstream bindings;
2734 for (auto const & output : outputs)
2735 {
2736 bindings << "'" << output.second->name << "' ";
2737 }
2738
2739 throw ParseException(
2740 boost::str(
2741 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2742 "Possible outputs are: [%3%] %4%") %
2743 subgraphId %
2744 name %
2745 bindings.str() %
2746 CHECK_LOCATION().AsString()));
2747}
2748
2749size_t TfLiteParser::GetSubgraphCount() const
2750{
2751 return m_Model->subgraphs.size();
2752}
2753
2754std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2755{
2756 CHECK_SUBGRAPH(m_Model, subgraphId);
2757 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2758 std::vector<std::string> result;
2759 result.reserve(inputs.size());
2760 for (auto const & input : inputs)
2761 {
2762 result.push_back(input.second->name);
2763 }
2764 return result;
2765}
2766
2767std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2768{
2769 CHECK_SUBGRAPH(m_Model, subgraphId);
2770 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2771 std::vector<std::string> result;
2772 result.reserve(outputs.size());
2773 for (auto const & output : outputs)
2774 {
2775 result.push_back(output.second->name);
2776 }
2777 return result;
2778}
2779
2780ITfLiteParser* ITfLiteParser::CreateRaw()
2781{
2782 return new TfLiteParser();
2783}
2784
2785ITfLiteParserPtr ITfLiteParser::Create()
2786{
2787 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2788}
2789
2790void ITfLiteParser::Destroy(ITfLiteParser* parser)
2791{
2792 delete parser;
2793}
2794
2795TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2796: m_FloatData(std::move(data))
2797, m_Uint8Data(nullptr)
2798, m_Int32Data(nullptr)
2799{
2800}
2801
2802TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2803: m_FloatData(nullptr)
2804, m_Uint8Data(std::move(data))
2805, m_Int32Data(nullptr)
2806{
2807}
2808
2809TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2810: m_FloatData(nullptr)
2811, m_Uint8Data(nullptr)
2812, m_Int32Data(std::move(data))
2813{
2814}
2815
2816} // armnnTfLiteParser