blob: 04fa6b194786f5b8d3d43bffc02fe4ff3a85758d [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026
27#include <fstream>
28#include <algorithm>
29#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010030#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000031#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010032
33using namespace armnn;
34using armnn::CheckLocation;
35namespace armnnTfLiteParser
36{
37namespace
38{
jimfly01c25411c2018-11-14 17:47:22 +000039
telsoa01c577f2c2018-08-31 09:22:23 +010040const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
41
42void CheckSubgraph(const TfLiteParser::ModelPtr & model,
43 size_t subgraphIndex,
44 const CheckLocation & location)
45{
46 if (model.get() == nullptr)
47 {
48 throw ParseException(
49 boost::str(
50 boost::format("%1% was called with invalid (null) model. "
51 "Possible reason is that the model is not yet loaded and Unpack(ed). "
52 "subgraph:%2% at %3%") %
53 location.m_Function %
54 subgraphIndex %
55 location.FileLine()));
56 }
57 else if (subgraphIndex >= model->subgraphs.size())
58 {
59 throw ParseException(
60 boost::str(
61 boost::format("%1% was called with an invalid subgraph index. "
62 "subgraph:%2% at %3%") %
63 location.m_Function %
64 subgraphIndex %
65 location.FileLine()));
66 }
67}
68
69#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
70 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
71
72void CheckModel(const TfLiteParser::ModelPtr & model,
73 size_t subgraphIndex,
74 size_t operatorIndex,
75 const CheckLocation & location)
76{
77 if (model.get() == nullptr)
78 {
79 throw ParseException(
80 boost::str(
81 boost::format("%1% was called with invalid (null) model. "
82 "Possible reason is that the model is not yet loaded and Unpack(ed). "
83 "subgraph:%2% operator:%3% at %4%") %
84 location.m_Function %
85 subgraphIndex %
86 operatorIndex %
87 location.FileLine()));
88 }
89 else if (subgraphIndex >= model->subgraphs.size())
90 {
91 throw ParseException(
92 boost::str(
93 boost::format("%1% was called with an invalid subgraph index. "
94 "subgraph:%2% operator:%3% at %4%") %
95 location.m_Function %
96 subgraphIndex %
97 operatorIndex %
98 location.FileLine()));
99 }
100 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
101 operatorIndex != VIRTUAL_OPERATOR_ID)
102 {
103 throw ParseException(
104 boost::str(
105 boost::format("%1% was called with an invalid operator index. "
106 "subgraph:%2% operator:%3% at %4%") %
107 location.m_Function %
108 subgraphIndex %
109 operatorIndex %
110 location.FileLine()));
111 }
112}
113
114#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
115 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
116
117void CheckTensor(const TfLiteParser::ModelPtr & model,
118 size_t subgraphIndex,
119 size_t tensorIndex,
120 const CheckLocation & location)
121{
122 // not checking model, because I assume CHECK_MODEL already run
123 // and checked that. An assert would do.
124 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
125
126 // also subgraph index should be checked by CHECK_MODEL so
127 // I only add an assert here
128 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
129
130 // the tensor index is the only one to check here
131 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
132 {
133 throw ParseException(
134 boost::str(
135 boost::format("%1% was called with an invalid tensor index. "
136 "subgraph:%2% tensor:%3% at %4%") %
137 location.m_Function %
138 subgraphIndex %
139 tensorIndex %
140 location.FileLine()));
141 }
142}
143
144#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
145 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
146
147void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
148 const CheckLocation & location)
149{
150 if (rawPtr == nullptr)
151 {
152 throw ParseException(
153 boost::str(
154 boost::format("%1% was called with a null tensor pointer. "
155 "at %2%") %
156 location.m_Function %
157 location.FileLine()));
158
159 }
160}
161
162#define CHECK_TENSOR_PTR(TENSOR_PTR) \
163 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
164
165void CheckBuffer(const TfLiteParser::ModelPtr & model,
166 size_t bufferIndex,
167 const CheckLocation & location)
168{
169 if (model.get() == nullptr)
170 {
171 throw ParseException(
172 boost::str(
173 boost::format("%1% was called with invalid (null) model. "
174 "Possible reason is that the model is not yet loaded and Unpack(ed). "
175 "buffer:%2% at %3%") %
176 location.m_Function %
177 bufferIndex %
178 location.FileLine()));
179 }
180 else if (bufferIndex >= model->buffers.size())
181 {
182 throw ParseException(
183 boost::str(
184 boost::format("%1% was called with an invalid buffer index. "
185 "buffer index:%2% at %3%") %
186 location.m_Function %
187 bufferIndex %
188 location.FileLine()));
189 }
190 else if (model->buffers[bufferIndex].get() == nullptr)
191 {
192 throw ParseException(
193 boost::str(
194 boost::format("The buffer #%1% is null. %3%") %
195 bufferIndex %
196 location.AsString()));
197 }
198}
199
200#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
201 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
202
203void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
204 const armnn::TensorInfo & tensorInfo,
205 uint32_t bufferId,
206 const CheckLocation & location)
207{
208 if (bufferPtr == nullptr)
209 {
210 throw ParseException(
211 boost::str(
212 boost::format("BufferPtr is null for buffer:%1%. %2%") %
213 bufferId %
214 location.AsString()));
215 }
216 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
217 tensorInfo.GetNumBytes() > bufferPtr->data.size())
218 {
219 std::stringstream ss;
220 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
221 << "For tensor: " << tensorInfo.GetShape()
222 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
223 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
224 throw ParseException(ss.str());
225 }
226}
227
228#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
229 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
230
231bool IsActivationSupported(tflite::ActivationFunctionType activationType)
232{
233 switch(activationType)
234 {
235 case tflite::ActivationFunctionType_NONE:
236 case tflite::ActivationFunctionType_RELU:
237 case tflite::ActivationFunctionType_RELU6:
238 case tflite::ActivationFunctionType_TANH:
239 {
240 return true;
241 }
242 default:
243 {
244 return false;
245 }
246 }
247}
248
249#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
250 do { \
251 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
252 { \
253 throw ParseException( \
254 boost::str( \
255 boost::format("TfLite parser doesn't suppport fused activation: " \
256 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
257 OPTION->fused_activation_function % \
258 tflite::EnumNameActivationFunctionType(\
259 OPTION->fused_activation_function) % \
260 __func__ % \
261 SUBGRAPH_INDEX % \
262 OPERATOR_INDEX % \
263 CHECK_LOCATION().FileLine())); \
264 } \
265 } while(false)
266
267
268std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
269{
270 std::vector<unsigned int> result;
271 result.reserve(in.size());
272 for (auto & i : in)
273 {
274 result.push_back(CHECKED_NON_NEGATIVE(i));
275 }
276 return result;
277}
278
279void CalcPadding(uint32_t inputSize,
280 uint32_t filterSize,
281 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100282 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100283 uint32_t& paddingFront,
284 uint32_t& paddingBack,
285 tflite::Padding padding)
286{
287 paddingFront = 0;
288 paddingBack = 0;
289 if (padding == tflite::Padding_SAME)
290 {
291 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100292 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
293 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100294 if (temp > inputSize)
295 {
296 paddingFront = (temp - inputSize) / 2;
297 paddingBack = (temp - inputSize) - paddingFront;
298 }
299 }
300}
301
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000302armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100303{
304 armnn::DataType type;
305 CHECK_TENSOR_PTR(tensorPtr);
306
307 switch (tensorPtr->type)
308 {
309 case tflite::TensorType_UINT8:
310 type = armnn::DataType::QuantisedAsymm8;
311 break;
312 case tflite::TensorType_FLOAT32:
313 type = armnn::DataType::Float32;
314 break;
315 case tflite::TensorType_INT32:
316 type = armnn::DataType::Signed32;
317 break;
318
319 default:
320 {
321 CheckLocation location = CHECK_LOCATION();
322 throw ParseException(
323 boost::str(
324 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
325 tensorPtr->type %
326 tflite::EnumNameTensorType(tensorPtr->type) %
327 tensorPtr->name %
328 location.AsString()));
329 }
330 }
331
332 float quantizationScale = 0.0f;
333 int32_t quantizationOffset = 0;
334
335 if (tensorPtr->quantization.get())
336 {
337 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
338 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
339
340 if (tensorPtr->quantization->scale.size() == 1)
341 {
342 quantizationScale = tensorPtr->quantization->scale[0];
343 }
344 if (tensorPtr->quantization->zero_point.size() == 1)
345 {
346 // NOTE: we lose precision here when converting from 64 bit to 32
347 // but this is what we support at the monent in ArmNN
348 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
349 }
350 }
351
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100352 std::vector<unsigned int> safeShape = shapes;
353 if (safeShape.size() == 0)
354 {
355 safeShape.push_back(1);
356 }
357
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // two statements (on purpose) for easier debugging:
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100359 armnn::TensorInfo result(static_cast<unsigned int>(safeShape.size()),
360 safeShape.data(),
telsoa01c577f2c2018-08-31 09:22:23 +0100361 type,
362 quantizationScale,
363 quantizationOffset);
364 return result;
365}
366
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000367armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
368{
369 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
370 return ToTensorInfo(tensorPtr, dimensions);
371}
372
telsoa01c577f2c2018-08-31 09:22:23 +0100373template<typename T>
374std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
375CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
376 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000377 armnn::TensorInfo& tensorInfo,
378 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100379{
380 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
381 BOOST_ASSERT_MSG(bufferPtr != nullptr,
382 boost::str(
383 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
384
385 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000386
387 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
388 {
389 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000390 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
391 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000392 }
393 else
394 {
395 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
396 }
397
telsoa01c577f2c2018-08-31 09:22:23 +0100398 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
399}
400
telsoa01c577f2c2018-08-31 09:22:23 +0100401armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
402{
403 // generate the binding id by shifting the tensor id by 8 bit
404 // and add the subgraph id, which allows 256 subgraphs
405 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
406}
407
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000408bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
409{
410 const unsigned int actualSize = actual.GetNumDimensions();
411 if (actualSize != expected.size())
412 {
413 return false;
414 }
415
416 for (unsigned int i = 0u; i < actualSize; i++)
417 {
418 if (expected[i] < 0 ||
419 actual[i] != static_cast<unsigned int>(expected[i]))
420 {
421 return false;
422 }
423 }
424
425 return true;
426}
427
telsoa01c577f2c2018-08-31 09:22:23 +0100428} // <anonymous>
429
430TfLiteParser::TfLiteParser()
431: m_Network(nullptr, nullptr)
432, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
433{
434 // register supported operators
435 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200436 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100437 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100438 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
439 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000440 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100441 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000442 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Matthew Jackson28c94572019-07-18 10:47:03 +0100443 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100444 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200445 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200446 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan58f39192018-09-17 14:14:39 +0100447 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
448 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100449 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -0200450 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
Sadik Armagan479045b2018-10-01 11:51:37 +0100451 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
Bruno Goncalvesbaded142019-02-08 19:02:48 -0200452 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100453 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalves451d95b2019-02-12 22:59:22 -0200454 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200455 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200456 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200457 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200458 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Matthew Jacksonbcca1f42019-07-16 11:39:21 +0100459 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200460 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
Nina Drozd0324f482019-04-08 10:52:10 +0100461 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Nina Drozd99851762019-04-09 09:37:38 +0100462 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
Nina Drozd200e3802019-04-15 09:47:39 +0100463 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
telsoa01c577f2c2018-08-31 09:22:23 +0100464}
465
466void TfLiteParser::ResetParser()
467{
468 m_Network = armnn::INetworkPtr(nullptr, nullptr);
469 m_Model = nullptr;
470 m_SubgraphConnections.clear();
471}
472
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200473void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
474 size_t operatorIndex,
475 IConnectableLayer *layer)
476{
477 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
478 BOOST_ASSERT(layer != nullptr);
479
Derek Lambertiff05cc52019-04-26 13:05:17 +0100480 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
481 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200482
483 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
484
485 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100486 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200487 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100488 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200489
490 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
491 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
492
493 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
494 {
495 uint32_t id = reshapedInputId;
496 reshapedInputId = inputId;
497 inputId = id;
498
499 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
500 inputTensorInfo = ToTensorInfo(tensorPtr);
501 }
502
503 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
504
505 std::vector<unsigned> reshapedDim;
506 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
507 {
508 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
509 }
510
511 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
512 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
513
514 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
515
516 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
517 armnn::ReshapeDescriptor desc;
518 desc.m_TargetShape = reshapedTensorInfo.GetShape();
519 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
520
521 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
522 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
523
524 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
525
526 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
527 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
528}
529
telsoa01c577f2c2018-08-31 09:22:23 +0100530INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
531{
532 ResetParser();
533 m_Model = LoadModelFromFile(graphFile);
534 return CreateNetworkFromModel();
535}
536
537INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
538{
539 ResetParser();
540 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
541 return CreateNetworkFromModel();
542}
543
544INetworkPtr TfLiteParser::CreateNetworkFromModel()
545{
546 m_Network = INetwork::Create();
547 BOOST_ASSERT(m_Model.get() != nullptr);
548
549 bool failedToCreate = false;
550 std::stringstream errors;
551
552 if (m_Model->subgraphs.size() != 1)
553 {
554 throw ParseException(
555 boost::str(
556 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
557 m_Model->subgraphs.size() %
558 CHECK_LOCATION().AsString()));
559 }
560
561 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100562 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100563 {
564 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
565
566 size_t operatorIndex = 0;
567 for (OperatorPtr const & op : subgraph->operators)
568 {
569 try
570 {
telsoa01c577f2c2018-08-31 09:22:23 +0100571 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
572 auto builtinCode = opCodePtr->builtin_code;
573
574 if (builtinCode > tflite::BuiltinOperator_MAX)
575 {
576 throw ParseException(
577 boost::str(
578 boost::format("Operator code %1% is out of range 0-%2%. "
579 "subgraph:%3% operator idx:%4%. %5%") %
580 builtinCode %
581 tflite::BuiltinOperator_MAX %
582 subgraphIndex %
583 operatorIndex %
584 CHECK_LOCATION().AsString()));
585 }
586
587 // lookup and call the parser function
588 auto & parserFunction = m_ParserFunctions[builtinCode];
589 (this->*parserFunction)(subgraphIndex, operatorIndex);
590 }
591 catch (const ParseException& e)
592 {
593 failedToCreate = true;
594 std::stringstream errorString;
595
596 errorString << "Failed to parse operator #" << operatorIndex
597 << " within subgraph #" << subgraphIndex
598 << " error: " << e.what();
599 BOOST_LOG_TRIVIAL(error) << errorString.str();
600
601 errors << errorString.str() << "\n";
602 }
603 ++operatorIndex;
604 }
605
606 SetupInputLayers(subgraphIndex);
607 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200608 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100609
610 ++subgraphIndex;
611 }
612
613 if (failedToCreate)
614 {
615 // we can skip everything and let the outer exception handler deal with the error
616 throw ParseException(errors.str());
617 }
618
619 // establish the connections from the layer outputs to the inputs of the subsequent layers
620 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
621 {
622 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
623 {
624 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
625 {
626 for (size_t inputSlotIdx = 0;
627 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
628 ++inputSlotIdx)
629 {
630 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
631 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
632 }
633 }
634 }
635 }
636
637 return std::move(m_Network);
638}
639
640void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
641 size_t tensorIndex,
642 armnn::IOutputSlot* slot)
643{
644 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
645 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
646 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
647
648 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
649
650 // assuming there is only one producer for that tensor
651 if (tensorSlots.outputSlot != nullptr)
652 {
653 throw ParseException(boost::str(
654 boost::format("Another layer has already registered itself as the producer of "
655 "subgraph:%1% tensor:%2% %3%") %
656 subgraphIndex %
657 tensorIndex %
658 CHECK_LOCATION().AsString()));
659 }
660
661 tensorSlots.outputSlot = slot;
662}
663
664void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
665 size_t tensorIndex,
666 armnn::IInputSlot* slot)
667{
668 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
669 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
670 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
671
672 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
673 tensorSlots.inputSlots.push_back(slot);
674}
675
676void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
677{
678 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
679 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
680 //
681 auto opcodeIndex = operatorPtr->opcode_index;
682 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
683
684 throw ParseException(
685 boost::str(
686 boost::format("Operator not supported. "
687 "subgraph:%1% operator:%2% "
688 "opcode_index:%3% opcode:%4% / %5% %6%") %
689 subgraphIndex %
690 operatorIndex %
691 opcodeIndex %
692 opcode %
693 tflite::EnumNameBuiltinOperator(opcode) %
694 CHECK_LOCATION().AsString()));
695}
696
telsoa01c577f2c2018-08-31 09:22:23 +0100697void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
698{
699 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
700
701 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
702 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
703
704 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
705
706 Convolution2dDescriptor desc;
707 desc.m_BiasEnabled = false;
708 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
709 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000710 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100711 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
712 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000713
telsoa01c577f2c2018-08-31 09:22:23 +0100714 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
715 CHECK_VALID_SIZE(inputs.size(), 2, 3);
716
717 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
718 CHECK_VALID_SIZE(outputs.size(), 1);
719
720 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
721 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
722
723 // assuming input is NHWC
724 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
725 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
726
727 // assuming the filter is OHWI : Output, H, W, Input
728 // which is essentially the same as NHWC
729 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
730 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
731
Pablo Tellof0bd6832019-04-26 17:58:13 +0100732 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
733 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
734 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
735 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100736
Matteo Martincigh747ef822018-12-18 09:26:39 +0000737 auto filterTensorAndData = CreateConstTensor(inputs[1],
738 filterTensorInfo,
739 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100740 armnn::IConnectableLayer* layer;
741
742 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
743
744 if (inputs.size() == 3)
745 {
746 desc.m_BiasEnabled = true;
747 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000748 auto biasTensorAndData = CreateConstTensor(inputs[2],
749 biasTensorInfo,
750 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100751 layer = m_Network->AddConvolution2dLayer(desc,
752 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100753 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100754 layerName.c_str());
755 }
756 else
757 {
758 layer = m_Network->AddConvolution2dLayer(desc,
759 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100760 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100761 layerName.c_str());
762 }
763
764 BOOST_ASSERT(layer != nullptr);
765
telsoa01c577f2c2018-08-31 09:22:23 +0100766 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000767 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100768
769 // register the input connection slots for the layer, connections are made after all layers have been created
770 // only the tensors for the inputs are relevant, exclude the const tensors
771 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000772 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100773
jimfly01c25411c2018-11-14 17:47:22 +0000774 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100775 // register the output connection slots for the layer, connections are made after all layers have been created
776 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
777 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
778}
779
780void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
781{
782 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
783
784 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
785 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
786
787 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
788
789 DepthwiseConvolution2dDescriptor desc;
790 desc.m_BiasEnabled = false;
791 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
792 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000793 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Telloe0200da2019-05-29 14:09:19 +0100794 // ACL only supports a depth (channel) multiplier of {1,2,3}, it is not currently stored in the descriptor
795 CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1,2,3 );
telsoa01c577f2c2018-08-31 09:22:23 +0100796
797 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
798 CHECK_VALID_SIZE(inputs.size(), 2, 3);
799 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
800 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100801 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
802 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000803
telsoa01c577f2c2018-08-31 09:22:23 +0100804 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
805 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
806
Matteo Martincigh747ef822018-12-18 09:26:39 +0000807 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100808 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
809 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000810
811 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100812 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
813 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
814
Matteo Martincigh747ef822018-12-18 09:26:39 +0000815 // Reshape weights as [ H, W, I, M ]
816 filterTensorInfo.SetShape({ filterHeight,
817 filterWidth,
818 inputTensorInfo.GetShape()[3],
819 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
820
821 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
822 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
823
Pablo Tellof0bd6832019-04-26 17:58:13 +0100824 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
825 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
826 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
827 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100828
Matteo Martincigh747ef822018-12-18 09:26:39 +0000829 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100830 armnn::IConnectableLayer* layer;
831 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
832
833 if (inputs.size() == 3)
834 {
835 desc.m_BiasEnabled = true;
836 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000837 auto biasTensorAndData = CreateConstTensor(inputs[2],
838 biasTensorInfo,
839 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100840 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
841 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100842 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100843 layerName.c_str());
844 }
845 else
846 {
847 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
848 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100849 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100850 layerName.c_str());
851 }
852 BOOST_ASSERT(layer != nullptr);
853
telsoa01c577f2c2018-08-31 09:22:23 +0100854 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000855 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100856
857 // register the input connection slots for the layer, connections are made after all layers have been created
858 // only the tensors for the inputs are relevant, exclude the const tensors
859 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000860 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100861
jimfly01c25411c2018-11-14 17:47:22 +0000862 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100863 // register the output connection slots for the layer, connections are made after all layers have been created
864 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
865 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
866}
867
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100868void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
869{
870 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
871}
872
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200873void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
874{
875 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
876
877 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
878 CHECK_VALID_SIZE(inputs.size(), 3);
879
880 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
881 CHECK_VALID_SIZE(outputs.size(), 1);
882
883 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
884 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
885
886 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
887 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
888
889 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
890 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
891
892 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
893 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
894
895 size_t step = 2;
896 std::vector<std::pair<unsigned int, unsigned int>> crops;
897 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
898 {
899 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
900 }
901
902 armnn::BatchToSpaceNdDescriptor desc;
903 desc.m_BlockShape = blockShape;
904 desc.m_Crops = crops;
905 desc.m_DataLayout = armnn::DataLayout::NHWC;
906
907 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
908
909 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
910 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
911
912 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
913
914 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
915 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
916
917 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
918 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
919}
920
Matthew Jackson28c94572019-07-18 10:47:03 +0100921void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
922{
923 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
924
925 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
926 CHECK_VALID_SIZE(inputs.size(), 1);
927
928 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
929 CHECK_VALID_SIZE(outputs.size(), 1);
930
931 L2NormalizationDescriptor desc;
932 desc.m_DataLayout = armnn::DataLayout::NHWC;
933 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
934 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
935
936 BOOST_ASSERT(layer != nullptr);
937
938 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
939 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
940
941 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
942 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
943
944 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
945 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
946}
947
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100948void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
949{
950 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
951}
952
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200953void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
954{
955 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
956
957 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
958 CHECK_VALID_SIZE(inputs.size(), 2);
959
960 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
961 CHECK_VALID_SIZE(outputs.size(), 1);
962
963 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
964 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
965
966 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
967 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
968
969 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
970 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
971
972 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
973 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
974 {
975 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
976 }
977 else
978 {
979 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
980 }
981
982 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
983 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
984}
985
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200986void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
987{
988 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
989
990 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
991 CHECK_VALID_SIZE(inputs.size(), 2);
992
993 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
994 CHECK_VALID_SIZE(outputs.size(), 1);
995
996 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
997 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
998
999 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1000 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1001
1002 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1003 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1004
1005 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1006 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1007 {
1008 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1009 }
1010 else
1011 {
1012 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1013 }
1014
1015 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1016 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1017}
1018
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001019void TfLiteParser::ParsePool(size_t subgraphIndex,
1020 size_t operatorIndex,
1021 PoolingAlgorithm algorithm)
1022{
1023 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1024
1025 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1026 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1027
1028 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1029
1030 std::string layerName;
1031
1032 switch (algorithm)
1033 {
1034 case PoolingAlgorithm::Average:
1035 layerName =
1036 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1037 break;
1038 case PoolingAlgorithm::Max:
1039 layerName =
1040 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1041 break;
1042 default:
1043 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1044 }
1045
1046 Pooling2dDescriptor desc;
1047
1048 desc.m_PoolType = algorithm;
1049 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1050 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1051 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1052 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1053 desc.m_PaddingMethod = PaddingMethod::Exclude;
1054 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001055 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001056
1057 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1058 CHECK_VALID_SIZE(inputs.size(), 1);
1059 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1060
1061 // assuming input is NHWC
1062 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1063 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1064
Pablo Tellof0bd6832019-04-26 17:58:13 +01001065 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1066 desc.m_PadTop, desc.m_PadBottom, options->padding);
1067 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1068 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001069
1070 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1071 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001072
1073 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1074
1075 BOOST_ASSERT(layer != nullptr);
1076
jimfly01c25411c2018-11-14 17:47:22 +00001077 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1078 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001079
1080 // register the input connection slots for the layer, connections are made after all layers have been created
1081 // only the tensors for the inputs are relevant, exclude the const tensors
1082 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001083 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001084
jimfly01c25411c2018-11-14 17:47:22 +00001085 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001086 // register the output connection slots for the layer, connections are made after all layers have been created
1087 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1088 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1089}
1090
telsoa01c577f2c2018-08-31 09:22:23 +01001091void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1092{
1093 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1094 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1095 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1096
1097 SoftmaxDescriptor desc;
1098 desc.m_Beta = options->beta;
1099
1100 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1101 CHECK_VALID_SIZE(inputs.size(), 1);
1102 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1103 CHECK_VALID_SIZE(outputs.size(), 1);
1104
1105 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1106 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1107
1108 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1109 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1110
1111 // register the input connection slots for the layer, connections are made after all layers have been created
1112 // only the tensors for the inputs are relevant, exclude the const tensors
1113 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1114 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1115
1116 // register the output connection slots for the layer, connections are made after all layers have been created
1117 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1118 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1119}
1120
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001121void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1122{
1123 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1124
1125 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1126 CHECK_VALID_SIZE(inputs.size(), 3);
1127
1128 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1129 CHECK_VALID_SIZE(outputs.size(), 1);
1130
1131 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1132 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1133
1134 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1135 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1136
1137 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1138 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1139
1140 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1141 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1142
1143 size_t step = 2;
1144 std::vector<std::pair<unsigned int, unsigned int>> padList;
1145 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1146 {
1147 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1148 }
1149
1150 armnn::SpaceToBatchNdDescriptor desc;
1151 desc.m_BlockShape = blockShape;
1152 desc.m_PadList = padList;
1153 desc.m_DataLayout = armnn::DataLayout::NHWC;
1154
1155 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1156
1157 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1158 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1159
1160 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1161
1162 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1163 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1164
1165 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1166 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1167}
1168
telsoa01c577f2c2018-08-31 09:22:23 +01001169armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1170 const armnn::TensorInfo & inputTensorInfo)
1171{
1172 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1173 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1174 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1175
1176 if (inputTensorInfo.GetNumDimensions() > 4)
1177 {
1178 std::stringstream ss;
1179 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1180 << " shape:" << inputTensorInfo.GetShape() << " "
1181 << CHECK_LOCATION().AsString();
1182 throw ParseException(ss.str());
1183 }
1184
1185 if (squeezeDims.empty())
1186 {
1187 squeezeDims.assign(dimensionSequence,
1188 dimensionSequence+inputTensorInfo.GetNumDimensions());
1189 }
1190
1191 std::vector<uint32_t> outputDims;
1192 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1193 {
1194 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1195 auto currentDimension = inputTensorInfo.GetShape()[i];
1196 if (skipSqueeze || currentDimension != 1)
1197 {
1198 outputDims.push_back(currentDimension);
1199 }
1200 }
1201
1202 if (outputDims.size() > 4)
1203 {
1204 std::stringstream ss;
1205 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1206 << " shape:" << inputTensorInfo.GetShape() << " "
1207 << CHECK_LOCATION().AsString();
1208 throw ParseException(ss.str());
1209 }
1210
1211 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1212 outputDims.data());
1213
1214 // we need to preserve the tensor type and the quantization data as well
1215 TensorInfo outTensorInfo = inputTensorInfo;
1216 outTensorInfo.SetShape(outShape);
1217
1218 return outTensorInfo;
1219}
1220
1221void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1222{
1223 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1224
1225 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1226 CHECK_VALID_SIZE(inputs.size(), 1);
1227
1228 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1229 CHECK_VALID_SIZE(outputs.size(), 1);
1230
1231 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1232 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1233
1234 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1235 armnn::TensorInfo outputTensorInfo =
1236 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1237 inputTensorInfo);
1238
1239 ReshapeDescriptor reshapeDesc;
1240 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1241
1242 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1243 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1244 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1245
1246 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1247 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1248
1249 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1250 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1251}
1252
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001253void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1254{
1255 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1256
1257 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1258 CHECK_VALID_SIZE(inputs.size(), 4);
1259
1260 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1261 CHECK_VALID_SIZE(outputs.size(), 1);
1262
1263 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1264 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1265
1266 StridedSliceDescriptor desc;
1267 desc.m_BeginMask = options->begin_mask;
1268 desc.m_EllipsisMask = options->ellipsis_mask;
1269 desc.m_EndMask = options->end_mask;
1270 desc.m_NewAxisMask = options->new_axis_mask;
1271 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1272 desc.m_DataLayout = armnn::DataLayout::NHWC;
1273
1274 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1275 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1276
1277 std::vector<int> begin(beginTensorInfo.GetNumElements());
1278 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1279
1280 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1281 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1282
1283 std::vector<int> end(endTensorInfo.GetNumElements());
1284 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1285
1286 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1287 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1288
1289 std::vector<int> stride(strideTensorInfo.GetNumElements());
1290 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1291
1292 desc.m_Begin = begin;
1293 desc.m_End = end;
1294 desc.m_Stride = stride;
1295
1296 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1297 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1298
1299 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1300 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1301
1302 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1303 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1304
1305 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1306 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1307}
1308
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001309void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1310{
1311 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1312
1313 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1314 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1315
1316 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1317 CHECK_VALID_SIZE(inputs.size(), 2);
1318
1319 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1320 CHECK_VALID_SIZE(outputs.size(), 1);
1321
1322 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1323 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1324
1325 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1326 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1327
1328 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1329 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1330
1331 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1332 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1333 {
1334 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1335 }
1336 else
1337 {
1338 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1339 }
1340
1341 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1342
1343 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1344 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1345}
1346
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001347void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1348{
1349 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1350
1351 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1352 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1353
1354 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1355 CHECK_VALID_SIZE(inputs.size(), 2);
1356
1357 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1358 CHECK_VALID_SIZE(outputs.size(), 1);
1359
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001360 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1361 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1362
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001363 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1364 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1365
1366 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1367 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1368
1369 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001370 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1371 {
1372 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1373 }
1374 else
1375 {
1376 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1377 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001378
1379 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1380
1381 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1382 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1383}
1384
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001385void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1386{
1387 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1388
1389 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1390 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1391
1392 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1393 CHECK_VALID_SIZE(inputs.size(), 2);
1394
1395 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1396 CHECK_VALID_SIZE(outputs.size(), 1);
1397
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001398 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1399 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1400
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001401 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1402 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1403
1404 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1405 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1406
1407 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001408 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1409 {
1410 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1411 }
1412 else
1413 {
1414 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1415 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001416
1417 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1418
1419 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1420 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1421}
1422
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001423void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1424{
1425 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1426
1427 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1428
1429 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1430 CHECK_VALID_SIZE(outputs.size(), 1);
1431
1432 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1433 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1434
1435 armnn::MeanDescriptor desc;
1436 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1437 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1438 desc.m_Axis = axis;
1439
1440 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1441 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1442
1443 desc.m_KeepDims =
1444 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1445 true : false;
1446
1447 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1448 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1449
1450 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1451
1452 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1453 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1454
1455 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1456 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1457}
1458
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001459void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1460{
1461 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1462
1463 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1464
1465 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1466 CHECK_VALID_SIZE(outputs.size(), 1);
1467
1468 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1469 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1470
1471 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1472 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1473
1474 size_t step = 2;
1475 armnn::PadDescriptor desc;
1476 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1477 {
1478 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1479 }
1480
1481 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1482 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1483
1484 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1485 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1486
1487 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1488 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1489
1490 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1491 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1492}
1493
Finn Williamsc42c3842019-01-22 14:18:11 +00001494
Sadik Armagan58f39192018-09-17 14:14:39 +01001495void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1496{
Finn Williamsc42c3842019-01-22 14:18:11 +00001497 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001498}
1499
1500void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1501{
Finn Williamsc42c3842019-01-22 14:18:11 +00001502 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1503}
Sadik Armagan58f39192018-09-17 14:14:39 +01001504
Finn Williamsc42c3842019-01-22 14:18:11 +00001505void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1506{
1507 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1508}
1509
Nina Drozd99851762019-04-09 09:37:38 +01001510void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1511{
1512 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1513}
1514
Finn Williamsc42c3842019-01-22 14:18:11 +00001515
1516void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1517{
1518 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001519 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1520 boost::ignore_unused(operatorPtr);
1521
1522 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1523 CHECK_VALID_SIZE(inputs.size(), 1);
1524
1525 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1526 CHECK_VALID_SIZE(outputs.size(), 1);
1527
Finn Williamsc42c3842019-01-22 14:18:11 +00001528 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001529 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001530 activationDesc.m_Function = activationType;
1531
1532 switch (activationType)
1533 {
1534 case ActivationFunction::ReLu:
1535 {
1536 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1537 break;
1538 }
1539 case ActivationFunction::BoundedReLu:
1540 {
1541 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1542 activationDesc.m_A = 6.0f;
1543 activationDesc.m_B = 0.0f;
1544 break;
1545 }
1546 case ActivationFunction::Sigmoid:
1547 {
1548 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1549 break;
1550 }
Nina Drozd99851762019-04-09 09:37:38 +01001551 case ActivationFunction::TanH:
1552 {
1553 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1554 activationDesc.m_A = 1.0f;
1555 activationDesc.m_B = 1.0f;
1556 break;
1557 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001558 default:
1559 {
1560 throw ParseException(
1561 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1562 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1563 }
1564 }
1565
1566 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001567
1568 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1569 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1570
1571 // register the input connection slots for the layer, connections are made after all layers have been created
1572 // only the tensors for the inputs are relevant, exclude the const tensors
1573 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1574 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1575
1576 // register the output connection slots for the layer, connections are made after all layers have been created
1577 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1578 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1579}
Sadikb94967b2018-09-19 15:30:00 +01001580armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1581 const std::vector<int32_t> & targetDimsIn)
1582{
1583 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1584 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1585
1586 if (stretchDim != targetDimsIn.end())
1587 {
1588 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1589 {
1590 throw ParseException(
1591 boost::str(
1592 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1593 }
1594
1595 auto targetNumElements =
1596 boost::numeric_cast<unsigned int>(
1597 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1598
1599 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1600 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1601 }
1602
1603 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1604
1605 TensorInfo reshapeInfo = inputTensorInfo;
1606 reshapeInfo.SetShape(outputShape);
1607
1608 return reshapeInfo;
1609}
1610
1611void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1612{
1613 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1614
1615 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001616
1617 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1618 CHECK_VALID_SIZE(outputs.size(), 1);
1619
1620 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1621 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1622
1623 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001624 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1625 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001626 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1627
kevmay0171972a82018-12-17 14:28:03 +00001628 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001629 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1630 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001631 {
1632 std::stringstream ss;
1633 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001634 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001635 << " does not equal output shape "
1636 << actualOutputTensorInfo.GetShape()
1637 << ": "
1638 << CHECK_LOCATION().AsString();
1639 throw ParseException(ss.str());
1640 }
1641
Sadikb94967b2018-09-19 15:30:00 +01001642 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001643 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001644
1645 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1646 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001647 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001648
1649 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1650 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1651
1652 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1653 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1654}
1655
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001656void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1657{
1658 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1659
1660 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1661 CHECK_VALID_SIZE(inputs.size(), 2);
1662
1663 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1664 CHECK_VALID_SIZE(outputs.size(), 1);
1665
1666 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1667
1668 // Data for the parsed tensor args (size) must be stored locally.
1669 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1670
1671 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1672 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1673
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001674 ResizeDescriptor desc;
1675 desc.m_Method = armnn::ResizeMethod::Bilinear;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001676 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001677 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1678 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001679
1680 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001681 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001682
1683 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1684 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1685
1686 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1687 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1688
1689 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1690 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1691}
1692
Sadik Armagan479045b2018-10-01 11:51:37 +01001693void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1694{
1695 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1696
1697 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1698 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1699
1700 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1701
1702 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1703 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1704 CHECK_VALID_SIZE(outputs.size(), 1);
1705
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001706 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1707 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001708
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001709 const unsigned int concatDimInput = static_cast<unsigned int>(
1710 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001711
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001712 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1713 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001714
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001715 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001716
1717 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1718 {
1719 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1720
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001721 // This set up concatDescriptor view origin
1722 armnnUtils::ProcessConcatInputTensorInfo(
1723 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001724 }
1725
1726 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01001727 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01001728
1729 BOOST_ASSERT(layer != nullptr);
1730
1731 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1732 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001733
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001734 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001735
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001736 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001737
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001738 // add fused activation layer
1739 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001740
Sadik Armagan479045b2018-10-01 11:51:37 +01001741 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1742 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1743}
1744
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001745void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1746{
1747 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1748
1749 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1750 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1751
1752 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1753
1754 FullyConnectedDescriptor desc;
1755 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001756 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001757
1758 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1759 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1760 CHECK_VALID_SIZE(outputs.size(), 1);
1761
1762 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1763
1764 // Fully Connected Layer accepts two dimensional weights input
1765 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1766 if (weightsDimension != 2)
1767 {
1768 throw ParseException(
1769 boost::str(
1770 boost::format(
1771 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1772 "Node %2%")
1773 % weightsDimension
1774 % CHECK_LOCATION().AsString()));
1775 }
1776
Matteo Martincigh747ef822018-12-18 09:26:39 +00001777 auto filterTensorAndData = CreateConstTensor(inputs[1],
1778 filterTensorInfo,
1779 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001780 armnn::IConnectableLayer* layer;
1781 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1782
1783 if (inputs.size() == 3)
1784 {
1785 desc.m_BiasEnabled = true;
1786 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001787 auto biasTensorAndData = CreateConstTensor(inputs[2],
1788 biasTensorInfo,
1789 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001790 layer = m_Network->AddFullyConnectedLayer(desc,
1791 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001792 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001793 layerName.c_str());
1794 }
1795 else
1796 {
1797 layer = m_Network->AddFullyConnectedLayer(desc,
1798 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001799 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001800 layerName.c_str());
1801 }
1802 BOOST_ASSERT(layer != nullptr);
1803
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001804 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1805
1806 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1807
1808 if (inputTensorInfo.GetNumDimensions() > 2)
1809 {
1810 // Add reshape to flatten to 2D [batch_size, input_size],
1811 // where "input_size" corresponds to the number of inputs to the layer,
1812 // matching the second dimension of weights,
1813 // and "batch_size" is calculated by dividing the number of elements by "input_size".
1814 std::vector<unsigned int> reshapedDimensions(2);
1815 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
1816 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
1817
1818 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
1819 {
1820 throw ParseException(
1821 boost::str(
1822 boost::format(
1823 "Failed to deduce input tensor shape from filter size %1%")
1824 % reshapedDimensions[1]
1825 % CHECK_LOCATION().AsString()));
1826 }
1827
1828 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
1829 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
1830
1831 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
1832 armnn::ReshapeDescriptor desc;
1833 desc.m_TargetShape = reshapedTensorInfo.GetShape();
1834 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
1835
1836 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
1837 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1838
1839 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
1840 }
1841 else
1842 {
1843 // register the input connection slot for the layer
1844 // only the tensors for the inputs are relevant, exclude the const tensors
1845 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1846 }
1847
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001848 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1849 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1850
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001851 // we need to add the activation layer and fortunately we don't need to care about the data layout
1852 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1853 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001854
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001855 // register the output connection slots for the layer, connections are made after all layers have been created
1856 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1857 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1858}
1859
keidav011b3e2ea2019-02-21 10:07:37 +00001860void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1861{
1862 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1863
1864 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1865
1866 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1867 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1868 CHECK_VALID_SIZE(outputs.size(), 4);
1869
1870 // Obtain custom options from flexbuffers
1871 auto custom_options = operatorPtr->custom_options;
1872 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1873
1874 // Obtain descriptor information from tf lite
1875 DetectionPostProcessDescriptor desc;
1876 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1877 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1878 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1879 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1880 desc.m_NumClasses = m["num_classes"].AsUInt32();
1881 desc.m_ScaleH = m["h_scale"].AsFloat();
1882 desc.m_ScaleW = m["w_scale"].AsFloat();
1883 desc.m_ScaleX = m["x_scale"].AsFloat();
1884 desc.m_ScaleY = m["y_scale"].AsFloat();
1885
keidav0107d58c72019-02-26 11:57:39 +00001886 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00001887 {
keidav0107d58c72019-02-26 11:57:39 +00001888 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00001889 }
1890 if (!(m["detections_per_class"].IsNull()))
1891 {
1892 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1893 }
1894
1895 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1896 {
1897 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1898 "must be positive and less than or equal to 1.");
1899 }
1900
1901 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
1902 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
1903 armnn::Optional<armnn::PermutationVector&>());
1904
1905 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
1906 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
1907 layerName.c_str());
1908
1909 BOOST_ASSERT(layer != nullptr);
1910
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001911 // The model does not specify the output shapes.
1912 // The output shapes are calculated from the max_detection and max_classes_per_detection.
1913 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
1914 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
1915 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
1916 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
1917 m_OverridenOutputShapes.push_back({ 1 });
1918
keidav011b3e2ea2019-02-21 10:07:37 +00001919 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
1920 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001921 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00001922 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
1923 }
1924
1925 // Register the input connection slots for the layer, connections are made after all layers have been created
1926 // only the tensors for the inputs are relevant, exclude the const tensors
1927 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1928 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1929
1930 // Register the output connection slots for the layer, connections are made after all layers have been created
1931 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1932 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
1933 outputTensorIndexes[1],
1934 outputTensorIndexes[2],
1935 outputTensorIndexes[3]});
1936}
1937
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01001938/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
1939void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
1940{
1941 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1942
1943 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1944 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1945 CHECK_VALID_SIZE(outputs.size(), 1);
1946
1947 if (inputs.size() < 1)
1948 {
1949 throw ParseException("Pack must have at least one input.");
1950 }
1951
1952 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1953 const auto* options = operatorPtr->builtin_options.AsPackOptions();
1954
1955 StackDescriptor desc;
1956 desc.m_Axis = static_cast<uint32_t>(options->axis);
1957 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
1958
1959 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
1960 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1961 desc.m_InputShape = inputTensorInfo.GetShape();
1962
1963 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
1964 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
1965
1966 BOOST_ASSERT(layer != nullptr);
1967
1968 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1969 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1970
1971 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1972 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
1973
1974 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1975 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1976}
1977
Nina Drozd200e3802019-04-15 09:47:39 +01001978void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
1979{
1980 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1981
1982 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1983 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
1984
1985 // This unpackAxis indicates the axis to unpack
1986 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
1987
1988 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1989 CHECK_VALID_SIZE(inputs.size(), 1);
1990
1991 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01001992
1993 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
1994 {
1995 throw ParseException(
1996 boost::str(
1997 boost::format(
1998 "The unpack axis: %1% cannot be greater than or equal to "
1999 "the number of input dimension %2% %3%")
2000 % unpackAxis
2001 % inputTensorInfo.GetNumDimensions()
2002 % CHECK_LOCATION().AsString()));
2003 }
2004
Nina Drozd200e3802019-04-15 09:47:39 +01002005 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2006 // If num is not defined, automatically infer from the length of the dimension axis.
2007 if(unpackNum == 0)
2008 {
2009 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2010 }
2011
2012 // If unpack number cannot be inferred and is still zero, throw ParseException.
2013 if(unpackNum == 0)
2014 {
2015 throw ParseException("Number to unpack must greater than zero.");
2016 }
2017
2018 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2019 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2020
2021 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2022 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2023
2024 // Add current input shape to unpackDimSizes
2025 for (unsigned int i = 0; i < inputDimSize; ++i)
2026 {
2027 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2028 }
2029
2030 if (unpackDimSizes[unpackAxis] != unpackNum)
2031 {
2032 throw ParseException("Number to unpack must be the same as length of the dimension to "
2033 "unpack along.");
2034 }
2035
2036 unpackDimSizes[unpackAxis] /= unpackNum;
2037
2038 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2039 for (unsigned int j = 0; j < unpackNum; ++j)
2040 {
2041 // Set the size of the views.
2042 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2043 {
2044 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2045 }
2046 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2047 }
2048
2049 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2050 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2051
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002052 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2053 unpackDimSizes.data());
2054
Nina Drozd200e3802019-04-15 09:47:39 +01002055 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2056 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2057
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002058 // Reshape to remove unpacked dimension
2059 unsigned int reshapedNumDimensions = inputDimSize - 1;
2060 std::vector<unsigned int> reshapedDimensions(reshapedNumDimensions);
Nina Drozd200e3802019-04-15 09:47:39 +01002061
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002062 unsigned int reshapeIndex = 0;
2063 for (unsigned int i = 0; i < inputDimSize; ++i)
Nina Drozd200e3802019-04-15 09:47:39 +01002064 {
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002065 if (i == unpackAxis)
2066 {
2067 continue;
2068 }
2069 reshapedDimensions[reshapeIndex++] = unpackDimSizes[i];
Nina Drozd200e3802019-04-15 09:47:39 +01002070 }
2071
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002072 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2073 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2074 {
2075 armnn::TensorInfo reshapedTensorInfo = inputTensorInfo;
2076 reshapedTensorInfo.SetShape(armnn::TensorShape{ reshapedNumDimensions, reshapedDimensions.data() });
2077
2078 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2079 armnn::ReshapeDescriptor desc;
2080 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2081 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2082
2083 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape, inputTensorInfo.GetDataType()));
2084 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2085
2086 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2087
2088 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2089 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2090 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2091 }
Nina Drozd200e3802019-04-15 09:47:39 +01002092}
2093
Nina Drozd0324f482019-04-08 10:52:10 +01002094void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2095{
2096 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2097
2098 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2099 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2100
2101 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2102
Nina Drozd200e3802019-04-15 09:47:39 +01002103 // If number of splits cannot be inferred and is zero, throw ParseException.
2104 if(numSplits == 0)
2105 {
2106 throw ParseException("Number to splits must greater than zero.");
2107 }
2108
Nina Drozd0324f482019-04-08 10:52:10 +01002109 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2110 CHECK_VALID_SIZE(inputs.size(), 2);
2111 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2112 CHECK_VALID_SIZE(outputs.size(), numSplits);
2113
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002114 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2115 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002116
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002117 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2118 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2119 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2120
2121 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2122 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002123
2124 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2125 if (splitDim == 0 || splitDim == 2)
2126 {
2127 throw ParseException(
2128 boost::str(
2129 boost::format(
2130 "Dimension %1% for split is not supported by Armnn. %2%")
2131 % splitDim
2132 % CHECK_LOCATION().AsString()));
2133 }
2134
2135 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002136 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002137 {
2138 throw ParseException(
2139 boost::str(
2140 boost::format(
2141 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002142 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002143 % inputTensorInfo.GetNumDimensions()
2144 % MaxNumOfTensorDimensions
2145 % CHECK_LOCATION().AsString()));
2146 }
2147
2148 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2149
2150 // Add current input shape to splitterDimSizes
2151 for (unsigned int i = 0; i < inputDimSize; ++i)
2152 {
2153 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2154 }
2155
2156 if (splitterDimSizes[splitDim] % numSplits != 0)
2157 {
2158 throw ParseException("Number of splits must evenly divide the dimension");
2159 }
2160 splitterDimSizes[splitDim] /= numSplits;
2161
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002162 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002163 for (unsigned int j = 0; j < numSplits; ++j)
2164 {
2165 // Set the size of the views.
2166 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2167 {
2168 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2169 }
2170 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2171 }
2172
2173 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2174 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2175
2176 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002177 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002178
2179 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2180 splitterDimSizes.data());
2181
2182 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2183 {
2184 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(outShape,
2185 inputTensorInfo.GetDataType()));
2186 }
2187
2188 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2189 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2190}
2191
Sadik Armagan58f39192018-09-17 14:14:39 +01002192armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2193 unsigned int outputSlot,
2194 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002195{
2196 ActivationDescriptor activationDesc;
2197 std::string layerName = prevLayer->GetName();
2198
2199 switch(activationType)
2200 {
2201 case tflite::ActivationFunctionType_NONE:
2202 {
2203 // this is a no-op: return previous layer
2204 return prevLayer;
2205 }
2206 case tflite::ActivationFunctionType_RELU:
2207 {
2208 activationDesc.m_Function = ActivationFunction::ReLu;
2209 layerName += ":RELU";
2210 break;
2211 }
2212 case tflite::ActivationFunctionType_RELU6:
2213 {
2214 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2215 activationDesc.m_A = 6.0f;
2216 activationDesc.m_B = 0.0f;
2217 layerName += ":RELU6";
2218 break;
2219 }
2220 case tflite::ActivationFunctionType_TANH:
2221 {
2222 activationDesc.m_Function = ActivationFunction::TanH;
2223 activationDesc.m_A = 1.0f;
2224 activationDesc.m_B = 1.0f;
2225 layerName += ":TANH";
2226 break;
2227 }
2228
2229 // I only put these here as a reminder what others we could support
2230 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2231 case tflite::ActivationFunctionType_SIGN_BIT:
2232 default:
2233 {
2234 throw ParseException(
2235 boost::str(
2236 boost::format("TfLite parser doesn't suppport fused activation: "
2237 "%1%/%2% %3% ") %
2238 activationType %
2239 tflite::EnumNameActivationFunctionType(activationType) %
2240 CHECK_LOCATION().AsString()));
2241
2242 }
2243 }
2244
2245 IConnectableLayer* activationLayer =
2246 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2247
2248 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2249 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2250 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2251 return activationLayer;
2252}
2253
2254TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2255{
2256 if (fileName == nullptr)
2257 {
2258 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2259 CHECK_LOCATION().AsString()));
2260 }
2261 boost::system::error_code errorCode;
2262 boost::filesystem::path pathToFile(fileName);
2263 if (!boost::filesystem::exists(pathToFile, errorCode))
2264 {
2265 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2266 fileName %
2267 errorCode %
2268 CHECK_LOCATION().AsString()));
2269 }
2270 std::ifstream file(fileName, std::ios::binary);
2271 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2272 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2273 fileContent.size());
2274}
2275
2276TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2277{
2278 if (binaryContent == nullptr)
2279 {
2280 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2281 CHECK_LOCATION().AsString()));
2282 }
2283 flatbuffers::Verifier verifier(binaryContent, len);
2284 if (verifier.VerifyBuffer<tflite::Model>() == false)
2285 {
2286 throw ParseException(
2287 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2288 "flatbuffers format. size:%1% %2%") %
2289 len %
2290 CHECK_LOCATION().AsString()));
2291 }
2292 return tflite::UnPackModel(binaryContent);
2293}
2294
2295TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2296 size_t subgraphIndex,
2297 size_t operatorIndex)
2298{
2299 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2300
Derek Lambertiff05cc52019-04-26 13:05:17 +01002301 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2302 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002303
2304 size_t inputCount = operatorPtr->inputs.size();
2305 TensorRawPtrVector result(inputCount);
2306 for (size_t i=0; i<inputCount; ++i)
2307 {
2308 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002309 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002310 }
2311 return result;
2312}
2313
2314TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2315 size_t subgraphIndex,
2316 size_t operatorIndex)
2317{
2318 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2319
Derek Lambertiff05cc52019-04-26 13:05:17 +01002320 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2321 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002322
2323 size_t outputCount = operatorPtr->outputs.size();
2324 TensorRawPtrVector result(outputCount);
2325 for (size_t i=0; i<outputCount; ++i)
2326 {
2327 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2328 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002329 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002330 }
2331 return result;
2332}
2333
2334TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2335 size_t subgraphIndex)
2336{
2337 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002338 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002339
Derek Lambertiff05cc52019-04-26 13:05:17 +01002340 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002341 TensorIdRawPtrVector result(inputCount);
2342 for (size_t i=0; i<inputCount; ++i)
2343 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002344 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002345 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002346 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002347 }
2348 return result;
2349}
2350
2351TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2352 size_t subgraphIndex)
2353{
2354 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002355 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002356
Derek Lambertiff05cc52019-04-26 13:05:17 +01002357 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002358 TensorIdRawPtrVector result(outputCount);
2359 for (size_t i=0; i<outputCount; ++i)
2360 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002361 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2362 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002363 }
2364 return result;
2365}
2366
2367std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2368 size_t subgraphIndex,
2369 size_t operatorIndex)
2370{
2371 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002372 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2373 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002374 return operatorPtr->inputs;
2375}
2376
2377std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2378 size_t subgraphIndex,
2379 size_t operatorIndex)
2380{
2381 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002382 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2383 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002384 return operatorPtr->outputs;
2385}
2386
2387void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2388 size_t operatorIndex,
2389 IConnectableLayer* layer,
2390 const std::vector<unsigned int>& tensorIndexes)
2391{
2392 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2393 BOOST_ASSERT(layer != nullptr);
2394 if (tensorIndexes.size() != layer->GetNumInputSlots())
2395 {
2396 throw ParseException(
2397 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2398 " for subgraph:%3% operator index:%4% %5%") %
2399 tensorIndexes.size() %
2400 layer->GetNumInputSlots() %
2401 subgraphIndex %
2402 operatorIndex %
2403 CHECK_LOCATION().AsString()));
2404 }
2405
2406 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2407 {
2408 unsigned int tensorIndex = tensorIndexes[slotIndex];
2409 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2410 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2411 }
2412}
2413
2414void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2415 size_t operatorIndex,
2416 IConnectableLayer* layer,
2417 const std::vector<unsigned int>& tensorIndexes)
2418{
2419 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2420 BOOST_ASSERT(layer != nullptr);
2421 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2422 {
2423 throw ParseException(
2424 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2425 " for subgraph:%3% operator index:%4% %5%") %
2426 tensorIndexes.size() %
2427 layer->GetNumOutputSlots() %
2428 subgraphIndex %
2429 operatorIndex %
2430 CHECK_LOCATION().AsString()));
2431 }
2432
2433 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2434 {
2435 unsigned int tensorIndex = tensorIndexes[slotIndex];
2436 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2437 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2438 }
2439}
2440
2441void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2442{
2443 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2444
2445 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2446 for (auto const & tensorIdAndPtr : inputs)
2447 {
2448 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2449 IConnectableLayer* layer =
2450 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2451
2452 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2453 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2454
2455 RegisterOutputSlots(subgraphIndex,
2456 VIRTUAL_OPERATOR_ID,
2457 layer,
2458 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2459 }
2460}
2461
2462void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2463{
2464 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2465
2466 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2467 for (auto const & tensorIdAndPtr : outputs)
2468 {
2469 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2470 IConnectableLayer* layer =
2471 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2472
2473 RegisterInputSlots(subgraphIndex,
2474 VIRTUAL_OPERATOR_ID,
2475 layer,
2476 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2477 }
2478}
2479
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002480void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2481{
2482 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2483
Derek Lambertiff05cc52019-04-26 13:05:17 +01002484 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002485 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2486 {
2487 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2488 {
2489 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2490 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2491 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002492 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002493 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2494 auto tensorAndData = CreateConstTensor(tensorPtr,
2495 tensorInfo,
2496 armnn::Optional<armnn::PermutationVector&>());
2497
2498 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2499 IConnectableLayer *layer =
2500 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2501
2502 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2503 RegisterOutputSlots(subgraphIndex,
2504 VIRTUAL_OPERATOR_ID,
2505 layer,
2506 { tensorIndex });
2507
2508 }
2509 }
2510 }
2511}
2512
telsoa01c577f2c2018-08-31 09:22:23 +01002513// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2514TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2515{
2516 CHECK_BUFFER(model, bufferIndex);
2517 return model->buffers[bufferIndex].get();
2518}
2519
Matteo Martincigh747ef822018-12-18 09:26:39 +00002520template<typename T>
2521std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2522TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2523 TfLiteParser::TensorRawPtr tensorPtr,
2524 armnn::TensorInfo& tensorInfo,
2525 armnn::Optional<armnn::PermutationVector&> permutationVector)
2526{
2527 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2528 tensorPtr,
2529 tensorInfo,
2530 permutationVector);
2531 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2532 return std::make_pair(constData.first, std::move(storage));
2533}
2534
telsoa01c577f2c2018-08-31 09:22:23 +01002535std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2536TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002537 armnn::TensorInfo& tensorInfo,
2538 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002539{
2540 CHECK_TENSOR_PTR(tensorPtr);
2541 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2542 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2543
2544 switch (tensorInfo.GetDataType())
2545 {
2546 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002547 return CreateConstTensorAndStoreData<float>(bufferPtr,
2548 tensorPtr,
2549 tensorInfo,
2550 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002551 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002552 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2553 tensorPtr,
2554 tensorInfo,
2555 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002556 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002557 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2558 tensorPtr,
2559 tensorInfo,
2560 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002561 default:
2562 {
2563 std::stringstream errString;
2564 errString << "Unexpected datatype when creating const tensor: "
2565 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2566 << " shape:" << tensorInfo.GetShape()
2567 << CHECK_LOCATION().AsString();
2568 throw ParseException(errString.str());
2569 }
2570 }
2571}
2572
2573BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2574 const std::string& name) const
2575{
2576 CHECK_SUBGRAPH(m_Model, subgraphId);
2577 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2578 for (auto const & input : inputs)
2579 {
2580 if (input.second->name == name)
2581 {
2582 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2583 return std::make_pair(bindingId, ToTensorInfo(input.second));
2584 }
2585 }
2586
2587 std::stringstream bindings;
2588 for (auto const & input : inputs)
2589 {
2590 bindings << "'" << input.second->name << "' ";
2591 }
2592
2593 throw ParseException(
2594 boost::str(
2595 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2596 "Possible inputs are: [%3%] %4%") %
2597 subgraphId %
2598 name %
2599 bindings.str() %
2600 CHECK_LOCATION().AsString()));
2601}
2602
2603BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2604 const std::string& name) const
2605{
2606 CHECK_SUBGRAPH(m_Model, subgraphId);
2607 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002608 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002609 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002610 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002611 if (output.second->name == name)
2612 {
2613 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002614 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2615 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2616 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002617 }
2618 }
2619
2620 std::stringstream bindings;
2621 for (auto const & output : outputs)
2622 {
2623 bindings << "'" << output.second->name << "' ";
2624 }
2625
2626 throw ParseException(
2627 boost::str(
2628 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2629 "Possible outputs are: [%3%] %4%") %
2630 subgraphId %
2631 name %
2632 bindings.str() %
2633 CHECK_LOCATION().AsString()));
2634}
2635
2636size_t TfLiteParser::GetSubgraphCount() const
2637{
2638 return m_Model->subgraphs.size();
2639}
2640
2641std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2642{
2643 CHECK_SUBGRAPH(m_Model, subgraphId);
2644 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2645 std::vector<std::string> result;
2646 result.reserve(inputs.size());
2647 for (auto const & input : inputs)
2648 {
2649 result.push_back(input.second->name);
2650 }
2651 return result;
2652}
2653
2654std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2655{
2656 CHECK_SUBGRAPH(m_Model, subgraphId);
2657 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2658 std::vector<std::string> result;
2659 result.reserve(outputs.size());
2660 for (auto const & output : outputs)
2661 {
2662 result.push_back(output.second->name);
2663 }
2664 return result;
2665}
2666
2667ITfLiteParser* ITfLiteParser::CreateRaw()
2668{
2669 return new TfLiteParser();
2670}
2671
2672ITfLiteParserPtr ITfLiteParser::Create()
2673{
2674 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2675}
2676
2677void ITfLiteParser::Destroy(ITfLiteParser* parser)
2678{
2679 delete parser;
2680}
2681
2682TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2683: m_FloatData(std::move(data))
2684, m_Uint8Data(nullptr)
2685, m_Int32Data(nullptr)
2686{
2687}
2688
2689TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2690: m_FloatData(nullptr)
2691, m_Uint8Data(std::move(data))
2692, m_Int32Data(nullptr)
2693{
2694}
2695
2696TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2697: m_FloatData(nullptr)
2698, m_Uint8Data(nullptr)
2699, m_Int32Data(std::move(data))
2700{
2701}
2702
2703} // armnnTfLiteParser