blob: 939640a5e33e1f46392658fa95e26b935e6dc6b3 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026
27#include <fstream>
28#include <algorithm>
29#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010030#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000031#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010032
33using namespace armnn;
34using armnn::CheckLocation;
35namespace armnnTfLiteParser
36{
37namespace
38{
jimfly01c25411c2018-11-14 17:47:22 +000039
telsoa01c577f2c2018-08-31 09:22:23 +010040const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
41
42void CheckSubgraph(const TfLiteParser::ModelPtr & model,
43 size_t subgraphIndex,
44 const CheckLocation & location)
45{
46 if (model.get() == nullptr)
47 {
48 throw ParseException(
49 boost::str(
50 boost::format("%1% was called with invalid (null) model. "
51 "Possible reason is that the model is not yet loaded and Unpack(ed). "
52 "subgraph:%2% at %3%") %
53 location.m_Function %
54 subgraphIndex %
55 location.FileLine()));
56 }
57 else if (subgraphIndex >= model->subgraphs.size())
58 {
59 throw ParseException(
60 boost::str(
61 boost::format("%1% was called with an invalid subgraph index. "
62 "subgraph:%2% at %3%") %
63 location.m_Function %
64 subgraphIndex %
65 location.FileLine()));
66 }
67}
68
69#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
70 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
71
72void CheckModel(const TfLiteParser::ModelPtr & model,
73 size_t subgraphIndex,
74 size_t operatorIndex,
75 const CheckLocation & location)
76{
77 if (model.get() == nullptr)
78 {
79 throw ParseException(
80 boost::str(
81 boost::format("%1% was called with invalid (null) model. "
82 "Possible reason is that the model is not yet loaded and Unpack(ed). "
83 "subgraph:%2% operator:%3% at %4%") %
84 location.m_Function %
85 subgraphIndex %
86 operatorIndex %
87 location.FileLine()));
88 }
89 else if (subgraphIndex >= model->subgraphs.size())
90 {
91 throw ParseException(
92 boost::str(
93 boost::format("%1% was called with an invalid subgraph index. "
94 "subgraph:%2% operator:%3% at %4%") %
95 location.m_Function %
96 subgraphIndex %
97 operatorIndex %
98 location.FileLine()));
99 }
100 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
101 operatorIndex != VIRTUAL_OPERATOR_ID)
102 {
103 throw ParseException(
104 boost::str(
105 boost::format("%1% was called with an invalid operator index. "
106 "subgraph:%2% operator:%3% at %4%") %
107 location.m_Function %
108 subgraphIndex %
109 operatorIndex %
110 location.FileLine()));
111 }
112}
113
114#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
115 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
116
117void CheckTensor(const TfLiteParser::ModelPtr & model,
118 size_t subgraphIndex,
119 size_t tensorIndex,
120 const CheckLocation & location)
121{
122 // not checking model, because I assume CHECK_MODEL already run
123 // and checked that. An assert would do.
124 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
125
126 // also subgraph index should be checked by CHECK_MODEL so
127 // I only add an assert here
128 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
129
130 // the tensor index is the only one to check here
131 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
132 {
133 throw ParseException(
134 boost::str(
135 boost::format("%1% was called with an invalid tensor index. "
136 "subgraph:%2% tensor:%3% at %4%") %
137 location.m_Function %
138 subgraphIndex %
139 tensorIndex %
140 location.FileLine()));
141 }
142}
143
144#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
145 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
146
147void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
148 const CheckLocation & location)
149{
150 if (rawPtr == nullptr)
151 {
152 throw ParseException(
153 boost::str(
154 boost::format("%1% was called with a null tensor pointer. "
155 "at %2%") %
156 location.m_Function %
157 location.FileLine()));
158
159 }
160}
161
162#define CHECK_TENSOR_PTR(TENSOR_PTR) \
163 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
164
165void CheckBuffer(const TfLiteParser::ModelPtr & model,
166 size_t bufferIndex,
167 const CheckLocation & location)
168{
169 if (model.get() == nullptr)
170 {
171 throw ParseException(
172 boost::str(
173 boost::format("%1% was called with invalid (null) model. "
174 "Possible reason is that the model is not yet loaded and Unpack(ed). "
175 "buffer:%2% at %3%") %
176 location.m_Function %
177 bufferIndex %
178 location.FileLine()));
179 }
180 else if (bufferIndex >= model->buffers.size())
181 {
182 throw ParseException(
183 boost::str(
184 boost::format("%1% was called with an invalid buffer index. "
185 "buffer index:%2% at %3%") %
186 location.m_Function %
187 bufferIndex %
188 location.FileLine()));
189 }
190 else if (model->buffers[bufferIndex].get() == nullptr)
191 {
192 throw ParseException(
193 boost::str(
194 boost::format("The buffer #%1% is null. %3%") %
195 bufferIndex %
196 location.AsString()));
197 }
198}
199
200#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
201 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
202
203void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
204 const armnn::TensorInfo & tensorInfo,
205 uint32_t bufferId,
206 const CheckLocation & location)
207{
208 if (bufferPtr == nullptr)
209 {
210 throw ParseException(
211 boost::str(
212 boost::format("BufferPtr is null for buffer:%1%. %2%") %
213 bufferId %
214 location.AsString()));
215 }
216 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
217 tensorInfo.GetNumBytes() > bufferPtr->data.size())
218 {
219 std::stringstream ss;
220 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
221 << "For tensor: " << tensorInfo.GetShape()
222 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
223 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
224 throw ParseException(ss.str());
225 }
226}
227
228#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
229 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
230
231bool IsActivationSupported(tflite::ActivationFunctionType activationType)
232{
233 switch(activationType)
234 {
235 case tflite::ActivationFunctionType_NONE:
236 case tflite::ActivationFunctionType_RELU:
237 case tflite::ActivationFunctionType_RELU6:
238 case tflite::ActivationFunctionType_TANH:
239 {
240 return true;
241 }
242 default:
243 {
244 return false;
245 }
246 }
247}
248
249#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
250 do { \
251 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
252 { \
253 throw ParseException( \
254 boost::str( \
255 boost::format("TfLite parser doesn't suppport fused activation: " \
256 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
257 OPTION->fused_activation_function % \
258 tflite::EnumNameActivationFunctionType(\
259 OPTION->fused_activation_function) % \
260 __func__ % \
261 SUBGRAPH_INDEX % \
262 OPERATOR_INDEX % \
263 CHECK_LOCATION().FileLine())); \
264 } \
265 } while(false)
266
267
268std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
269{
270 std::vector<unsigned int> result;
271 result.reserve(in.size());
272 for (auto & i : in)
273 {
274 result.push_back(CHECKED_NON_NEGATIVE(i));
275 }
276 return result;
277}
278
279void CalcPadding(uint32_t inputSize,
280 uint32_t filterSize,
281 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100282 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100283 uint32_t& paddingFront,
284 uint32_t& paddingBack,
285 tflite::Padding padding)
286{
287 paddingFront = 0;
288 paddingBack = 0;
289 if (padding == tflite::Padding_SAME)
290 {
291 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100292 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
293 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100294 if (temp > inputSize)
295 {
296 paddingFront = (temp - inputSize) / 2;
297 paddingBack = (temp - inputSize) - paddingFront;
298 }
299 }
300}
301
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000302armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100303{
304 armnn::DataType type;
305 CHECK_TENSOR_PTR(tensorPtr);
306
307 switch (tensorPtr->type)
308 {
309 case tflite::TensorType_UINT8:
310 type = armnn::DataType::QuantisedAsymm8;
311 break;
312 case tflite::TensorType_FLOAT32:
313 type = armnn::DataType::Float32;
314 break;
315 case tflite::TensorType_INT32:
316 type = armnn::DataType::Signed32;
317 break;
318
319 default:
320 {
321 CheckLocation location = CHECK_LOCATION();
322 throw ParseException(
323 boost::str(
324 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
325 tensorPtr->type %
326 tflite::EnumNameTensorType(tensorPtr->type) %
327 tensorPtr->name %
328 location.AsString()));
329 }
330 }
331
332 float quantizationScale = 0.0f;
333 int32_t quantizationOffset = 0;
334
335 if (tensorPtr->quantization.get())
336 {
337 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
338 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
339
340 if (tensorPtr->quantization->scale.size() == 1)
341 {
342 quantizationScale = tensorPtr->quantization->scale[0];
343 }
344 if (tensorPtr->quantization->zero_point.size() == 1)
345 {
346 // NOTE: we lose precision here when converting from 64 bit to 32
347 // but this is what we support at the monent in ArmNN
348 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
349 }
350 }
351
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100352 std::vector<unsigned int> safeShape = shapes;
353 if (safeShape.size() == 0)
354 {
355 safeShape.push_back(1);
356 }
357
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // two statements (on purpose) for easier debugging:
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100359 armnn::TensorInfo result(static_cast<unsigned int>(safeShape.size()),
360 safeShape.data(),
telsoa01c577f2c2018-08-31 09:22:23 +0100361 type,
362 quantizationScale,
363 quantizationOffset);
364 return result;
365}
366
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000367armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
368{
369 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
370 return ToTensorInfo(tensorPtr, dimensions);
371}
372
telsoa01c577f2c2018-08-31 09:22:23 +0100373template<typename T>
374std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
375CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
376 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000377 armnn::TensorInfo& tensorInfo,
378 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100379{
380 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
381 BOOST_ASSERT_MSG(bufferPtr != nullptr,
382 boost::str(
383 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
384
385 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000386
387 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
388 {
389 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000390 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
391 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000392 }
393 else
394 {
395 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
396 }
397
telsoa01c577f2c2018-08-31 09:22:23 +0100398 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
399}
400
telsoa01c577f2c2018-08-31 09:22:23 +0100401armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
402{
403 // generate the binding id by shifting the tensor id by 8 bit
404 // and add the subgraph id, which allows 256 subgraphs
405 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
406}
407
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000408bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
409{
410 const unsigned int actualSize = actual.GetNumDimensions();
411 if (actualSize != expected.size())
412 {
413 return false;
414 }
415
416 for (unsigned int i = 0u; i < actualSize; i++)
417 {
418 if (expected[i] < 0 ||
419 actual[i] != static_cast<unsigned int>(expected[i]))
420 {
421 return false;
422 }
423 }
424
425 return true;
426}
427
telsoa01c577f2c2018-08-31 09:22:23 +0100428} // <anonymous>
429
430TfLiteParser::TfLiteParser()
431: m_Network(nullptr, nullptr)
432, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
433{
434 // register supported operators
435 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200436 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100437 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100438 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
439 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000440 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100441 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000442 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Matthew Jackson28c94572019-07-18 10:47:03 +0100443 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100444 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200445 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200446 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan58f39192018-09-17 14:14:39 +0100447 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
448 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100449 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -0200450 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
Sadik Armagan479045b2018-10-01 11:51:37 +0100451 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
Bruno Goncalvesbaded142019-02-08 19:02:48 -0200452 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100453 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalves451d95b2019-02-12 22:59:22 -0200454 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200455 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200456 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200457 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200458 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Matthew Jacksonbcca1f42019-07-16 11:39:21 +0100459 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200460 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
Nina Drozd0324f482019-04-08 10:52:10 +0100461 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Nina Drozd99851762019-04-09 09:37:38 +0100462 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
Keith Davis4cd29a02019-09-09 14:49:20 +0100463 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100464 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
Nina Drozd200e3802019-04-15 09:47:39 +0100465 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
telsoa01c577f2c2018-08-31 09:22:23 +0100466}
467
468void TfLiteParser::ResetParser()
469{
470 m_Network = armnn::INetworkPtr(nullptr, nullptr);
471 m_Model = nullptr;
472 m_SubgraphConnections.clear();
473}
474
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200475void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
476 size_t operatorIndex,
477 IConnectableLayer *layer)
478{
479 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
480 BOOST_ASSERT(layer != nullptr);
481
Derek Lambertiff05cc52019-04-26 13:05:17 +0100482 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
483 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200484
485 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
486
487 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100488 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200489 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100490 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200491
492 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
493 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
494
495 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
496 {
497 uint32_t id = reshapedInputId;
498 reshapedInputId = inputId;
499 inputId = id;
500
501 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
502 inputTensorInfo = ToTensorInfo(tensorPtr);
503 }
504
505 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
506
507 std::vector<unsigned> reshapedDim;
508 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
509 {
510 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
511 }
512
513 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
514 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
515
516 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
517
518 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
519 armnn::ReshapeDescriptor desc;
520 desc.m_TargetShape = reshapedTensorInfo.GetShape();
521 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
522
523 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
524 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
525
526 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
527
528 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
529 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
530}
531
telsoa01c577f2c2018-08-31 09:22:23 +0100532INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
533{
534 ResetParser();
535 m_Model = LoadModelFromFile(graphFile);
536 return CreateNetworkFromModel();
537}
538
539INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
540{
541 ResetParser();
542 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
543 return CreateNetworkFromModel();
544}
545
546INetworkPtr TfLiteParser::CreateNetworkFromModel()
547{
548 m_Network = INetwork::Create();
549 BOOST_ASSERT(m_Model.get() != nullptr);
550
551 bool failedToCreate = false;
552 std::stringstream errors;
553
554 if (m_Model->subgraphs.size() != 1)
555 {
556 throw ParseException(
557 boost::str(
558 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
559 m_Model->subgraphs.size() %
560 CHECK_LOCATION().AsString()));
561 }
562
563 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100564 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100565 {
566 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
567
568 size_t operatorIndex = 0;
569 for (OperatorPtr const & op : subgraph->operators)
570 {
571 try
572 {
telsoa01c577f2c2018-08-31 09:22:23 +0100573 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
574 auto builtinCode = opCodePtr->builtin_code;
575
576 if (builtinCode > tflite::BuiltinOperator_MAX)
577 {
578 throw ParseException(
579 boost::str(
580 boost::format("Operator code %1% is out of range 0-%2%. "
581 "subgraph:%3% operator idx:%4%. %5%") %
582 builtinCode %
583 tflite::BuiltinOperator_MAX %
584 subgraphIndex %
585 operatorIndex %
586 CHECK_LOCATION().AsString()));
587 }
588
589 // lookup and call the parser function
590 auto & parserFunction = m_ParserFunctions[builtinCode];
591 (this->*parserFunction)(subgraphIndex, operatorIndex);
592 }
593 catch (const ParseException& e)
594 {
595 failedToCreate = true;
596 std::stringstream errorString;
597
598 errorString << "Failed to parse operator #" << operatorIndex
599 << " within subgraph #" << subgraphIndex
600 << " error: " << e.what();
601 BOOST_LOG_TRIVIAL(error) << errorString.str();
602
603 errors << errorString.str() << "\n";
604 }
605 ++operatorIndex;
606 }
607
608 SetupInputLayers(subgraphIndex);
609 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200610 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100611
612 ++subgraphIndex;
613 }
614
615 if (failedToCreate)
616 {
617 // we can skip everything and let the outer exception handler deal with the error
618 throw ParseException(errors.str());
619 }
620
621 // establish the connections from the layer outputs to the inputs of the subsequent layers
622 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
623 {
624 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
625 {
626 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
627 {
628 for (size_t inputSlotIdx = 0;
629 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
630 ++inputSlotIdx)
631 {
632 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
633 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
634 }
635 }
636 }
637 }
638
639 return std::move(m_Network);
640}
641
642void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
643 size_t tensorIndex,
644 armnn::IOutputSlot* slot)
645{
646 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
647 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
648 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
649
650 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
651
652 // assuming there is only one producer for that tensor
653 if (tensorSlots.outputSlot != nullptr)
654 {
655 throw ParseException(boost::str(
656 boost::format("Another layer has already registered itself as the producer of "
657 "subgraph:%1% tensor:%2% %3%") %
658 subgraphIndex %
659 tensorIndex %
660 CHECK_LOCATION().AsString()));
661 }
662
663 tensorSlots.outputSlot = slot;
664}
665
666void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
667 size_t tensorIndex,
668 armnn::IInputSlot* slot)
669{
670 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
671 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
672 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
673
674 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
675 tensorSlots.inputSlots.push_back(slot);
676}
677
678void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
679{
680 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
681 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
682 //
683 auto opcodeIndex = operatorPtr->opcode_index;
684 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
685
686 throw ParseException(
687 boost::str(
688 boost::format("Operator not supported. "
689 "subgraph:%1% operator:%2% "
690 "opcode_index:%3% opcode:%4% / %5% %6%") %
691 subgraphIndex %
692 operatorIndex %
693 opcodeIndex %
694 opcode %
695 tflite::EnumNameBuiltinOperator(opcode) %
696 CHECK_LOCATION().AsString()));
697}
698
telsoa01c577f2c2018-08-31 09:22:23 +0100699void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
700{
701 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
702
703 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
704 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
705
706 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
707
708 Convolution2dDescriptor desc;
709 desc.m_BiasEnabled = false;
710 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
711 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000712 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100713 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
714 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000715
telsoa01c577f2c2018-08-31 09:22:23 +0100716 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
717 CHECK_VALID_SIZE(inputs.size(), 2, 3);
718
719 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
720 CHECK_VALID_SIZE(outputs.size(), 1);
721
722 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
723 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
724
725 // assuming input is NHWC
726 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
727 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
728
729 // assuming the filter is OHWI : Output, H, W, Input
730 // which is essentially the same as NHWC
731 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
732 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
733
Pablo Tellof0bd6832019-04-26 17:58:13 +0100734 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
735 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
736 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
737 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100738
Matteo Martincigh747ef822018-12-18 09:26:39 +0000739 auto filterTensorAndData = CreateConstTensor(inputs[1],
740 filterTensorInfo,
741 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100742 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100743
744 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
745
746 if (inputs.size() == 3)
747 {
748 desc.m_BiasEnabled = true;
749 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000750 auto biasTensorAndData = CreateConstTensor(inputs[2],
751 biasTensorInfo,
752 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100753 layer = m_Network->AddConvolution2dLayer(desc,
754 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100755 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100756 layerName.c_str());
757 }
758 else
759 {
760 layer = m_Network->AddConvolution2dLayer(desc,
761 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100762 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100763 layerName.c_str());
764 }
765
766 BOOST_ASSERT(layer != nullptr);
767
telsoa01c577f2c2018-08-31 09:22:23 +0100768 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000769 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100770
771 // register the input connection slots for the layer, connections are made after all layers have been created
772 // only the tensors for the inputs are relevant, exclude the const tensors
773 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000774 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100775
jimfly01c25411c2018-11-14 17:47:22 +0000776 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100777 // register the output connection slots for the layer, connections are made after all layers have been created
778 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
779 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
780}
781
782void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
783{
784 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
785
786 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
787 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
788
789 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
790
791 DepthwiseConvolution2dDescriptor desc;
792 desc.m_BiasEnabled = false;
793 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
794 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000795 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100796 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100797
798 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
799 CHECK_VALID_SIZE(inputs.size(), 2, 3);
800 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
801 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100802 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
803 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000804
telsoa01c577f2c2018-08-31 09:22:23 +0100805 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
806 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
807
Matteo Martincigh747ef822018-12-18 09:26:39 +0000808 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100809 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
810 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000811
812 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100813 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
814 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
815
Matteo Martincigh747ef822018-12-18 09:26:39 +0000816 // Reshape weights as [ H, W, I, M ]
817 filterTensorInfo.SetShape({ filterHeight,
818 filterWidth,
819 inputTensorInfo.GetShape()[3],
820 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
821
822 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
823 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
824
Pablo Tellof0bd6832019-04-26 17:58:13 +0100825 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
826 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
827 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
828 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100829
Matteo Martincigh747ef822018-12-18 09:26:39 +0000830 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100831 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100832 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
833
834 if (inputs.size() == 3)
835 {
836 desc.m_BiasEnabled = true;
837 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000838 auto biasTensorAndData = CreateConstTensor(inputs[2],
839 biasTensorInfo,
840 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100841 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
842 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100843 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100844 layerName.c_str());
845 }
846 else
847 {
848 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
849 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100850 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100851 layerName.c_str());
852 }
853 BOOST_ASSERT(layer != nullptr);
854
telsoa01c577f2c2018-08-31 09:22:23 +0100855 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000856 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100857
858 // register the input connection slots for the layer, connections are made after all layers have been created
859 // only the tensors for the inputs are relevant, exclude the const tensors
860 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000861 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100862
jimfly01c25411c2018-11-14 17:47:22 +0000863 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100864 // register the output connection slots for the layer, connections are made after all layers have been created
865 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
866 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
867}
868
Keith Davis4cd29a02019-09-09 14:49:20 +0100869void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
870{
871 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
872
873 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
874 CHECK_VALID_SIZE(inputs.size(), 2);
875
876 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
877 CHECK_VALID_SIZE(outputs.size(), 1);
878
879 armnn::IConnectableLayer* layer = nullptr;
880 auto layerName = boost::str(boost::format("Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
881
882 PermuteDescriptor desc;
883
884 layer = m_Network->AddPermuteLayer(desc, layerName.c_str());
885
886 BOOST_ASSERT(layer != nullptr);
887
888 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
889 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
890
891 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
892 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
893
894 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
895 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
896}
897
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100898void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
899{
900 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
901
902 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
903 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
904
905 TransposeConvolution2dDescriptor desc;
906 desc.m_BiasEnabled = false;
907 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
908 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
909 desc.m_DataLayout = armnn::DataLayout::NHWC;
910
911 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100912 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100913
914 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
915 CHECK_VALID_SIZE(outputs.size(), 1);
916
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100917 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100918 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
919
920 // TfLite uses NHWC tensors
921 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
922 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
923
924 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
925 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
926
927 CalcPadding(inputHeight,
928 filterHeight,
929 desc.m_StrideY,
930 1, // DilationY
931 desc.m_PadTop,
932 desc.m_PadBottom,
933 options->padding);
934
935 CalcPadding(inputWidth,
936 filterWidth,
937 desc.m_StrideX,
938 1, // DilationX
939 desc.m_PadLeft,
940 desc.m_PadRight,
941 options->padding);
942
943 auto filterTensorAndData = CreateConstTensor(inputs[1],
944 filterTensorInfo,
945 armnn::Optional<armnn::PermutationVector&>());
946
947 armnn::IConnectableLayer* layer = nullptr;
948 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
949
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100950 layer = m_Network->AddTransposeConvolution2dLayer(desc,
951 filterTensorAndData.first,
952 EmptyOptional(),
953 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100954
955 BOOST_ASSERT(layer != nullptr);
956
957 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
958 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
959
960 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
961 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +0100962 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100963
964 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
965 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
966}
967
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100968void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
969{
970 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
971}
972
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200973void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
974{
975 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
976
977 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
978 CHECK_VALID_SIZE(inputs.size(), 3);
979
980 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
981 CHECK_VALID_SIZE(outputs.size(), 1);
982
983 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
984 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
985
986 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
987 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
988
989 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
990 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
991
992 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
993 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
994
995 size_t step = 2;
996 std::vector<std::pair<unsigned int, unsigned int>> crops;
997 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
998 {
999 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1000 }
1001
1002 armnn::BatchToSpaceNdDescriptor desc;
1003 desc.m_BlockShape = blockShape;
1004 desc.m_Crops = crops;
1005 desc.m_DataLayout = armnn::DataLayout::NHWC;
1006
1007 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1008
1009 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1010 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1011
1012 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1013
1014 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1015 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1016
1017 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1018 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1019}
1020
Matthew Jackson28c94572019-07-18 10:47:03 +01001021void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1022{
1023 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1024
1025 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1026 CHECK_VALID_SIZE(inputs.size(), 1);
1027
1028 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1029 CHECK_VALID_SIZE(outputs.size(), 1);
1030
1031 L2NormalizationDescriptor desc;
1032 desc.m_DataLayout = armnn::DataLayout::NHWC;
1033 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1034 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1035
1036 BOOST_ASSERT(layer != nullptr);
1037
1038 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1039 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1040
1041 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1042 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1043
1044 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1045 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1046}
1047
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001048void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1049{
1050 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1051}
1052
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001053void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1054{
1055 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1056
1057 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1058 CHECK_VALID_SIZE(inputs.size(), 2);
1059
1060 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1061 CHECK_VALID_SIZE(outputs.size(), 1);
1062
1063 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1064 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1065
1066 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1067 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1068
1069 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1070 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1071
1072 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1073 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1074 {
1075 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1076 }
1077 else
1078 {
1079 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1080 }
1081
1082 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1083 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1084}
1085
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001086void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1087{
1088 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1089
1090 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1091 CHECK_VALID_SIZE(inputs.size(), 2);
1092
1093 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1094 CHECK_VALID_SIZE(outputs.size(), 1);
1095
1096 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1097 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1098
1099 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1100 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1101
1102 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1103 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1104
1105 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1106 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1107 {
1108 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1109 }
1110 else
1111 {
1112 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1113 }
1114
1115 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1116 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1117}
1118
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001119void TfLiteParser::ParsePool(size_t subgraphIndex,
1120 size_t operatorIndex,
1121 PoolingAlgorithm algorithm)
1122{
1123 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1124
1125 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1126 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1127
1128 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1129
1130 std::string layerName;
1131
1132 switch (algorithm)
1133 {
1134 case PoolingAlgorithm::Average:
1135 layerName =
1136 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1137 break;
1138 case PoolingAlgorithm::Max:
1139 layerName =
1140 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1141 break;
1142 default:
1143 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1144 }
1145
1146 Pooling2dDescriptor desc;
1147
1148 desc.m_PoolType = algorithm;
1149 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1150 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1151 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1152 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1153 desc.m_PaddingMethod = PaddingMethod::Exclude;
1154 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001155 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001156
1157 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1158 CHECK_VALID_SIZE(inputs.size(), 1);
1159 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1160
1161 // assuming input is NHWC
1162 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1163 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1164
Pablo Tellof0bd6832019-04-26 17:58:13 +01001165 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1166 desc.m_PadTop, desc.m_PadBottom, options->padding);
1167 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1168 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001169
1170 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1171 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001172
1173 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1174
1175 BOOST_ASSERT(layer != nullptr);
1176
jimfly01c25411c2018-11-14 17:47:22 +00001177 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1178 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001179
1180 // register the input connection slots for the layer, connections are made after all layers have been created
1181 // only the tensors for the inputs are relevant, exclude the const tensors
1182 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001183 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001184
jimfly01c25411c2018-11-14 17:47:22 +00001185 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001186 // register the output connection slots for the layer, connections are made after all layers have been created
1187 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1188 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1189}
1190
telsoa01c577f2c2018-08-31 09:22:23 +01001191void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1192{
1193 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1194 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1195 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1196
1197 SoftmaxDescriptor desc;
1198 desc.m_Beta = options->beta;
1199
1200 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1201 CHECK_VALID_SIZE(inputs.size(), 1);
1202 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1203 CHECK_VALID_SIZE(outputs.size(), 1);
1204
1205 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1206 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1207
1208 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1209 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1210
1211 // register the input connection slots for the layer, connections are made after all layers have been created
1212 // only the tensors for the inputs are relevant, exclude the const tensors
1213 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1214 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1215
1216 // register the output connection slots for the layer, connections are made after all layers have been created
1217 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1218 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1219}
1220
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001221void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1222{
1223 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1224
1225 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1226 CHECK_VALID_SIZE(inputs.size(), 3);
1227
1228 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1229 CHECK_VALID_SIZE(outputs.size(), 1);
1230
1231 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1232 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1233
1234 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1235 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1236
1237 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1238 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1239
1240 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1241 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1242
1243 size_t step = 2;
1244 std::vector<std::pair<unsigned int, unsigned int>> padList;
1245 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1246 {
1247 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1248 }
1249
1250 armnn::SpaceToBatchNdDescriptor desc;
1251 desc.m_BlockShape = blockShape;
1252 desc.m_PadList = padList;
1253 desc.m_DataLayout = armnn::DataLayout::NHWC;
1254
1255 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1256
1257 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1258 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1259
1260 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1261
1262 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1263 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1264
1265 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1266 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1267}
1268
telsoa01c577f2c2018-08-31 09:22:23 +01001269armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1270 const armnn::TensorInfo & inputTensorInfo)
1271{
1272 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1273 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1274 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1275
1276 if (inputTensorInfo.GetNumDimensions() > 4)
1277 {
1278 std::stringstream ss;
1279 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1280 << " shape:" << inputTensorInfo.GetShape() << " "
1281 << CHECK_LOCATION().AsString();
1282 throw ParseException(ss.str());
1283 }
1284
1285 if (squeezeDims.empty())
1286 {
1287 squeezeDims.assign(dimensionSequence,
1288 dimensionSequence+inputTensorInfo.GetNumDimensions());
1289 }
1290
1291 std::vector<uint32_t> outputDims;
1292 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1293 {
1294 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1295 auto currentDimension = inputTensorInfo.GetShape()[i];
1296 if (skipSqueeze || currentDimension != 1)
1297 {
1298 outputDims.push_back(currentDimension);
1299 }
1300 }
1301
1302 if (outputDims.size() > 4)
1303 {
1304 std::stringstream ss;
1305 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1306 << " shape:" << inputTensorInfo.GetShape() << " "
1307 << CHECK_LOCATION().AsString();
1308 throw ParseException(ss.str());
1309 }
1310
1311 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1312 outputDims.data());
1313
1314 // we need to preserve the tensor type and the quantization data as well
1315 TensorInfo outTensorInfo = inputTensorInfo;
1316 outTensorInfo.SetShape(outShape);
1317
1318 return outTensorInfo;
1319}
1320
1321void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1322{
1323 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1324
1325 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1326 CHECK_VALID_SIZE(inputs.size(), 1);
1327
1328 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1329 CHECK_VALID_SIZE(outputs.size(), 1);
1330
1331 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1332 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1333
1334 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1335 armnn::TensorInfo outputTensorInfo =
1336 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1337 inputTensorInfo);
1338
1339 ReshapeDescriptor reshapeDesc;
1340 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1341
1342 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1343 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1344 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1345
1346 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1347 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1348
1349 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1350 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1351}
1352
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001353void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1354{
1355 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1356
1357 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1358 CHECK_VALID_SIZE(inputs.size(), 4);
1359
1360 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1361 CHECK_VALID_SIZE(outputs.size(), 1);
1362
1363 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1364 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1365
1366 StridedSliceDescriptor desc;
1367 desc.m_BeginMask = options->begin_mask;
1368 desc.m_EllipsisMask = options->ellipsis_mask;
1369 desc.m_EndMask = options->end_mask;
1370 desc.m_NewAxisMask = options->new_axis_mask;
1371 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1372 desc.m_DataLayout = armnn::DataLayout::NHWC;
1373
1374 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1375 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1376
1377 std::vector<int> begin(beginTensorInfo.GetNumElements());
1378 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1379
1380 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1381 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1382
1383 std::vector<int> end(endTensorInfo.GetNumElements());
1384 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1385
1386 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1387 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1388
1389 std::vector<int> stride(strideTensorInfo.GetNumElements());
1390 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1391
1392 desc.m_Begin = begin;
1393 desc.m_End = end;
1394 desc.m_Stride = stride;
1395
1396 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1397 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1398
1399 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1400 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1401
1402 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1403 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1404
1405 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1406 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1407}
1408
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001409void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1410{
1411 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1412
1413 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1414 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1415
1416 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1417 CHECK_VALID_SIZE(inputs.size(), 2);
1418
1419 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1420 CHECK_VALID_SIZE(outputs.size(), 1);
1421
1422 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1423 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1424
1425 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1426 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1427
1428 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1429 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1430
1431 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1432 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1433 {
1434 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1435 }
1436 else
1437 {
1438 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1439 }
1440
1441 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1442
1443 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1444 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1445}
1446
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001447void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1448{
1449 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1450
1451 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1452 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1453
1454 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1455 CHECK_VALID_SIZE(inputs.size(), 2);
1456
1457 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1458 CHECK_VALID_SIZE(outputs.size(), 1);
1459
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001460 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1461 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1462
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001463 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1464 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1465
1466 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1467 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1468
1469 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001470 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1471 {
1472 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1473 }
1474 else
1475 {
1476 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1477 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001478
1479 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1480
1481 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1482 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1483}
1484
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001485void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1486{
1487 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1488
1489 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1490 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1491
1492 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1493 CHECK_VALID_SIZE(inputs.size(), 2);
1494
1495 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1496 CHECK_VALID_SIZE(outputs.size(), 1);
1497
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001498 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1499 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1500
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001501 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1502 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1503
1504 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1505 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1506
1507 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001508 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1509 {
1510 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1511 }
1512 else
1513 {
1514 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1515 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001516
1517 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1518
1519 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1520 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1521}
1522
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001523void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1524{
1525 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1526
1527 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1528
1529 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1530 CHECK_VALID_SIZE(outputs.size(), 1);
1531
1532 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1533 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1534
1535 armnn::MeanDescriptor desc;
1536 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1537 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1538 desc.m_Axis = axis;
1539
1540 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1541 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1542
1543 desc.m_KeepDims =
1544 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1545 true : false;
1546
1547 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1548 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1549
1550 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1551
1552 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1553 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1554
1555 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1556 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1557}
1558
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001559void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1560{
1561 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1562
1563 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1564
1565 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1566 CHECK_VALID_SIZE(outputs.size(), 1);
1567
1568 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1569 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1570
1571 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1572 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1573
1574 size_t step = 2;
1575 armnn::PadDescriptor desc;
1576 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1577 {
1578 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1579 }
1580
1581 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1582 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1583
1584 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1585 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1586
1587 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1588 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1589
1590 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1591 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1592}
1593
Finn Williamsc42c3842019-01-22 14:18:11 +00001594
Sadik Armagan58f39192018-09-17 14:14:39 +01001595void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1596{
Finn Williamsc42c3842019-01-22 14:18:11 +00001597 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001598}
1599
1600void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1601{
Finn Williamsc42c3842019-01-22 14:18:11 +00001602 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1603}
Sadik Armagan58f39192018-09-17 14:14:39 +01001604
Finn Williamsc42c3842019-01-22 14:18:11 +00001605void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1606{
1607 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1608}
1609
Nina Drozd99851762019-04-09 09:37:38 +01001610void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1611{
1612 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1613}
1614
Finn Williamsc42c3842019-01-22 14:18:11 +00001615
1616void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1617{
1618 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001619 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1620 boost::ignore_unused(operatorPtr);
1621
1622 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1623 CHECK_VALID_SIZE(inputs.size(), 1);
1624
1625 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1626 CHECK_VALID_SIZE(outputs.size(), 1);
1627
Finn Williamsc42c3842019-01-22 14:18:11 +00001628 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001629 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001630 activationDesc.m_Function = activationType;
1631
1632 switch (activationType)
1633 {
1634 case ActivationFunction::ReLu:
1635 {
1636 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1637 break;
1638 }
1639 case ActivationFunction::BoundedReLu:
1640 {
1641 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1642 activationDesc.m_A = 6.0f;
1643 activationDesc.m_B = 0.0f;
1644 break;
1645 }
1646 case ActivationFunction::Sigmoid:
1647 {
1648 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1649 break;
1650 }
Nina Drozd99851762019-04-09 09:37:38 +01001651 case ActivationFunction::TanH:
1652 {
1653 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1654 activationDesc.m_A = 1.0f;
1655 activationDesc.m_B = 1.0f;
1656 break;
1657 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001658 default:
1659 {
1660 throw ParseException(
1661 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1662 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1663 }
1664 }
1665
1666 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001667
1668 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1669 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1670
1671 // register the input connection slots for the layer, connections are made after all layers have been created
1672 // only the tensors for the inputs are relevant, exclude the const tensors
1673 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1674 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1675
1676 // register the output connection slots for the layer, connections are made after all layers have been created
1677 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1678 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1679}
Sadikb94967b2018-09-19 15:30:00 +01001680armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1681 const std::vector<int32_t> & targetDimsIn)
1682{
1683 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1684 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1685
1686 if (stretchDim != targetDimsIn.end())
1687 {
1688 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1689 {
1690 throw ParseException(
1691 boost::str(
1692 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1693 }
1694
1695 auto targetNumElements =
1696 boost::numeric_cast<unsigned int>(
1697 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1698
1699 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1700 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1701 }
1702
1703 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1704
1705 TensorInfo reshapeInfo = inputTensorInfo;
1706 reshapeInfo.SetShape(outputShape);
1707
1708 return reshapeInfo;
1709}
1710
1711void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1712{
1713 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1714
1715 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001716
1717 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1718 CHECK_VALID_SIZE(outputs.size(), 1);
1719
1720 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1721 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1722
1723 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001724 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1725 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001726 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1727
kevmay0171972a82018-12-17 14:28:03 +00001728 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001729 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1730 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001731 {
1732 std::stringstream ss;
1733 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001734 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001735 << " does not equal output shape "
1736 << actualOutputTensorInfo.GetShape()
1737 << ": "
1738 << CHECK_LOCATION().AsString();
1739 throw ParseException(ss.str());
1740 }
1741
Sadikb94967b2018-09-19 15:30:00 +01001742 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001743 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001744
1745 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1746 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001747 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001748
1749 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1750 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1751
1752 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1753 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1754}
1755
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001756void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1757{
1758 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1759
1760 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1761 CHECK_VALID_SIZE(inputs.size(), 2);
1762
1763 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1764 CHECK_VALID_SIZE(outputs.size(), 1);
1765
1766 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1767
1768 // Data for the parsed tensor args (size) must be stored locally.
1769 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1770
1771 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1772 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1773
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001774 ResizeDescriptor desc;
1775 desc.m_Method = armnn::ResizeMethod::Bilinear;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001776 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001777 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1778 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001779
1780 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001781 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001782
1783 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1784 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1785
1786 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1787 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1788
1789 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1790 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1791}
1792
Sadik Armagan479045b2018-10-01 11:51:37 +01001793void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1794{
1795 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1796
1797 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1798 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1799
1800 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1801
1802 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1803 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1804 CHECK_VALID_SIZE(outputs.size(), 1);
1805
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001806 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1807 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001808
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001809 const unsigned int concatDimInput = static_cast<unsigned int>(
1810 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001811
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001812 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1813 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001814
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001815 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001816
1817 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1818 {
1819 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1820
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001821 // This set up concatDescriptor view origin
1822 armnnUtils::ProcessConcatInputTensorInfo(
1823 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001824 }
1825
1826 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01001827 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01001828
1829 BOOST_ASSERT(layer != nullptr);
1830
1831 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1832 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001833
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001834 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001835
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001836 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001837
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001838 // add fused activation layer
1839 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001840
Sadik Armagan479045b2018-10-01 11:51:37 +01001841 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1842 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1843}
1844
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001845void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1846{
1847 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1848
1849 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1850 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1851
1852 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1853
1854 FullyConnectedDescriptor desc;
1855 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001856 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001857
1858 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1859 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1860 CHECK_VALID_SIZE(outputs.size(), 1);
1861
1862 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1863
1864 // Fully Connected Layer accepts two dimensional weights input
1865 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1866 if (weightsDimension != 2)
1867 {
1868 throw ParseException(
1869 boost::str(
1870 boost::format(
1871 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1872 "Node %2%")
1873 % weightsDimension
1874 % CHECK_LOCATION().AsString()));
1875 }
1876
Matteo Martincigh747ef822018-12-18 09:26:39 +00001877 auto filterTensorAndData = CreateConstTensor(inputs[1],
1878 filterTensorInfo,
1879 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001880 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001881 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1882
1883 if (inputs.size() == 3)
1884 {
1885 desc.m_BiasEnabled = true;
1886 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001887 auto biasTensorAndData = CreateConstTensor(inputs[2],
1888 biasTensorInfo,
1889 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001890 layer = m_Network->AddFullyConnectedLayer(desc,
1891 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001892 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001893 layerName.c_str());
1894 }
1895 else
1896 {
1897 layer = m_Network->AddFullyConnectedLayer(desc,
1898 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001899 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001900 layerName.c_str());
1901 }
1902 BOOST_ASSERT(layer != nullptr);
1903
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001904 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1905
1906 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1907
1908 if (inputTensorInfo.GetNumDimensions() > 2)
1909 {
1910 // Add reshape to flatten to 2D [batch_size, input_size],
1911 // where "input_size" corresponds to the number of inputs to the layer,
1912 // matching the second dimension of weights,
1913 // and "batch_size" is calculated by dividing the number of elements by "input_size".
1914 std::vector<unsigned int> reshapedDimensions(2);
1915 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
1916 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
1917
1918 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
1919 {
1920 throw ParseException(
1921 boost::str(
1922 boost::format(
1923 "Failed to deduce input tensor shape from filter size %1%")
1924 % reshapedDimensions[1]
1925 % CHECK_LOCATION().AsString()));
1926 }
1927
1928 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
1929 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
1930
1931 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
1932 armnn::ReshapeDescriptor desc;
1933 desc.m_TargetShape = reshapedTensorInfo.GetShape();
1934 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
1935
1936 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
1937 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1938
1939 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
1940 }
1941 else
1942 {
1943 // register the input connection slot for the layer
1944 // only the tensors for the inputs are relevant, exclude the const tensors
1945 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1946 }
1947
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001948 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1949 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1950
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001951 // we need to add the activation layer and fortunately we don't need to care about the data layout
1952 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1953 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001954
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001955 // register the output connection slots for the layer, connections are made after all layers have been created
1956 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1957 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1958}
1959
keidav011b3e2ea2019-02-21 10:07:37 +00001960void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1961{
1962 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1963
1964 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1965
1966 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1967 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1968 CHECK_VALID_SIZE(outputs.size(), 4);
1969
1970 // Obtain custom options from flexbuffers
1971 auto custom_options = operatorPtr->custom_options;
1972 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1973
1974 // Obtain descriptor information from tf lite
1975 DetectionPostProcessDescriptor desc;
1976 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1977 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1978 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1979 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1980 desc.m_NumClasses = m["num_classes"].AsUInt32();
1981 desc.m_ScaleH = m["h_scale"].AsFloat();
1982 desc.m_ScaleW = m["w_scale"].AsFloat();
1983 desc.m_ScaleX = m["x_scale"].AsFloat();
1984 desc.m_ScaleY = m["y_scale"].AsFloat();
1985
keidav0107d58c72019-02-26 11:57:39 +00001986 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00001987 {
keidav0107d58c72019-02-26 11:57:39 +00001988 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00001989 }
1990 if (!(m["detections_per_class"].IsNull()))
1991 {
1992 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1993 }
1994
1995 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1996 {
1997 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1998 "must be positive and less than or equal to 1.");
1999 }
2000
2001 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2002 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2003 armnn::Optional<armnn::PermutationVector&>());
2004
2005 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2006 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2007 layerName.c_str());
2008
2009 BOOST_ASSERT(layer != nullptr);
2010
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002011 // The model does not specify the output shapes.
2012 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2013 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2014 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2015 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2016 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2017 m_OverridenOutputShapes.push_back({ 1 });
2018
keidav011b3e2ea2019-02-21 10:07:37 +00002019 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2020 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002021 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002022 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2023 }
2024
2025 // Register the input connection slots for the layer, connections are made after all layers have been created
2026 // only the tensors for the inputs are relevant, exclude the const tensors
2027 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2028 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2029
2030 // Register the output connection slots for the layer, connections are made after all layers have been created
2031 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2032 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2033 outputTensorIndexes[1],
2034 outputTensorIndexes[2],
2035 outputTensorIndexes[3]});
2036}
2037
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002038/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2039void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2040{
2041 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2042
2043 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2044 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2045 CHECK_VALID_SIZE(outputs.size(), 1);
2046
2047 if (inputs.size() < 1)
2048 {
2049 throw ParseException("Pack must have at least one input.");
2050 }
2051
2052 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2053 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2054
2055 StackDescriptor desc;
2056 desc.m_Axis = static_cast<uint32_t>(options->axis);
2057 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2058
2059 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2060 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2061 desc.m_InputShape = inputTensorInfo.GetShape();
2062
2063 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2064 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2065
2066 BOOST_ASSERT(layer != nullptr);
2067
2068 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2069 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2070
2071 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2072 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2073
2074 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2075 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2076}
2077
Nina Drozd200e3802019-04-15 09:47:39 +01002078void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2079{
2080 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2081
2082 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2083 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2084
2085 // This unpackAxis indicates the axis to unpack
2086 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2087
2088 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2089 CHECK_VALID_SIZE(inputs.size(), 1);
2090
2091 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002092
2093 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2094 {
2095 throw ParseException(
2096 boost::str(
2097 boost::format(
2098 "The unpack axis: %1% cannot be greater than or equal to "
2099 "the number of input dimension %2% %3%")
2100 % unpackAxis
2101 % inputTensorInfo.GetNumDimensions()
2102 % CHECK_LOCATION().AsString()));
2103 }
2104
Nina Drozd200e3802019-04-15 09:47:39 +01002105 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2106 // If num is not defined, automatically infer from the length of the dimension axis.
2107 if(unpackNum == 0)
2108 {
2109 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2110 }
2111
2112 // If unpack number cannot be inferred and is still zero, throw ParseException.
2113 if(unpackNum == 0)
2114 {
2115 throw ParseException("Number to unpack must greater than zero.");
2116 }
2117
2118 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2119 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2120
2121 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2122 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2123
2124 // Add current input shape to unpackDimSizes
2125 for (unsigned int i = 0; i < inputDimSize; ++i)
2126 {
2127 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2128 }
2129
2130 if (unpackDimSizes[unpackAxis] != unpackNum)
2131 {
2132 throw ParseException("Number to unpack must be the same as length of the dimension to "
2133 "unpack along.");
2134 }
2135
2136 unpackDimSizes[unpackAxis] /= unpackNum;
2137
2138 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2139 for (unsigned int j = 0; j < unpackNum; ++j)
2140 {
2141 // Set the size of the views.
2142 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2143 {
2144 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2145 }
2146 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2147 }
2148
2149 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2150 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2151
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002152 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2153 unpackDimSizes.data());
2154
Nina Drozd200e3802019-04-15 09:47:39 +01002155 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2156 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2157
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002158 // Reshape to remove unpacked dimension
2159 unsigned int reshapedNumDimensions = inputDimSize - 1;
2160 std::vector<unsigned int> reshapedDimensions(reshapedNumDimensions);
Nina Drozd200e3802019-04-15 09:47:39 +01002161
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002162 unsigned int reshapeIndex = 0;
2163 for (unsigned int i = 0; i < inputDimSize; ++i)
Nina Drozd200e3802019-04-15 09:47:39 +01002164 {
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002165 if (i == unpackAxis)
2166 {
2167 continue;
2168 }
2169 reshapedDimensions[reshapeIndex++] = unpackDimSizes[i];
Nina Drozd200e3802019-04-15 09:47:39 +01002170 }
2171
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002172 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2173 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2174 {
2175 armnn::TensorInfo reshapedTensorInfo = inputTensorInfo;
2176 reshapedTensorInfo.SetShape(armnn::TensorShape{ reshapedNumDimensions, reshapedDimensions.data() });
2177
2178 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2179 armnn::ReshapeDescriptor desc;
2180 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2181 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2182
2183 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape, inputTensorInfo.GetDataType()));
2184 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2185
2186 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2187
2188 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2189 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2190 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2191 }
Nina Drozd200e3802019-04-15 09:47:39 +01002192}
2193
Nina Drozd0324f482019-04-08 10:52:10 +01002194void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2195{
2196 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2197
2198 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2199 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2200
2201 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2202
Nina Drozd200e3802019-04-15 09:47:39 +01002203 // If number of splits cannot be inferred and is zero, throw ParseException.
2204 if(numSplits == 0)
2205 {
2206 throw ParseException("Number to splits must greater than zero.");
2207 }
2208
Nina Drozd0324f482019-04-08 10:52:10 +01002209 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2210 CHECK_VALID_SIZE(inputs.size(), 2);
2211 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2212 CHECK_VALID_SIZE(outputs.size(), numSplits);
2213
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002214 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2215 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002216
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002217 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2218 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2219 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2220
2221 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2222 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002223
2224 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2225 if (splitDim == 0 || splitDim == 2)
2226 {
2227 throw ParseException(
2228 boost::str(
2229 boost::format(
2230 "Dimension %1% for split is not supported by Armnn. %2%")
2231 % splitDim
2232 % CHECK_LOCATION().AsString()));
2233 }
2234
2235 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002236 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002237 {
2238 throw ParseException(
2239 boost::str(
2240 boost::format(
2241 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002242 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002243 % inputTensorInfo.GetNumDimensions()
2244 % MaxNumOfTensorDimensions
2245 % CHECK_LOCATION().AsString()));
2246 }
2247
2248 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2249
2250 // Add current input shape to splitterDimSizes
2251 for (unsigned int i = 0; i < inputDimSize; ++i)
2252 {
2253 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2254 }
2255
2256 if (splitterDimSizes[splitDim] % numSplits != 0)
2257 {
2258 throw ParseException("Number of splits must evenly divide the dimension");
2259 }
2260 splitterDimSizes[splitDim] /= numSplits;
2261
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002262 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002263 for (unsigned int j = 0; j < numSplits; ++j)
2264 {
2265 // Set the size of the views.
2266 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2267 {
2268 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2269 }
2270 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2271 }
2272
2273 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2274 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2275
2276 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002277 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002278
2279 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2280 splitterDimSizes.data());
2281
2282 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2283 {
2284 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(outShape,
2285 inputTensorInfo.GetDataType()));
2286 }
2287
2288 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2289 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2290}
2291
Sadik Armagan58f39192018-09-17 14:14:39 +01002292armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2293 unsigned int outputSlot,
2294 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002295{
2296 ActivationDescriptor activationDesc;
2297 std::string layerName = prevLayer->GetName();
2298
2299 switch(activationType)
2300 {
2301 case tflite::ActivationFunctionType_NONE:
2302 {
2303 // this is a no-op: return previous layer
2304 return prevLayer;
2305 }
2306 case tflite::ActivationFunctionType_RELU:
2307 {
2308 activationDesc.m_Function = ActivationFunction::ReLu;
2309 layerName += ":RELU";
2310 break;
2311 }
2312 case tflite::ActivationFunctionType_RELU6:
2313 {
2314 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2315 activationDesc.m_A = 6.0f;
2316 activationDesc.m_B = 0.0f;
2317 layerName += ":RELU6";
2318 break;
2319 }
2320 case tflite::ActivationFunctionType_TANH:
2321 {
2322 activationDesc.m_Function = ActivationFunction::TanH;
2323 activationDesc.m_A = 1.0f;
2324 activationDesc.m_B = 1.0f;
2325 layerName += ":TANH";
2326 break;
2327 }
2328
2329 // I only put these here as a reminder what others we could support
2330 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2331 case tflite::ActivationFunctionType_SIGN_BIT:
2332 default:
2333 {
2334 throw ParseException(
2335 boost::str(
2336 boost::format("TfLite parser doesn't suppport fused activation: "
2337 "%1%/%2% %3% ") %
2338 activationType %
2339 tflite::EnumNameActivationFunctionType(activationType) %
2340 CHECK_LOCATION().AsString()));
2341
2342 }
2343 }
2344
2345 IConnectableLayer* activationLayer =
2346 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2347
2348 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2349 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2350 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2351 return activationLayer;
2352}
2353
2354TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2355{
2356 if (fileName == nullptr)
2357 {
2358 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2359 CHECK_LOCATION().AsString()));
2360 }
2361 boost::system::error_code errorCode;
2362 boost::filesystem::path pathToFile(fileName);
2363 if (!boost::filesystem::exists(pathToFile, errorCode))
2364 {
2365 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2366 fileName %
2367 errorCode %
2368 CHECK_LOCATION().AsString()));
2369 }
2370 std::ifstream file(fileName, std::ios::binary);
2371 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2372 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2373 fileContent.size());
2374}
2375
2376TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2377{
2378 if (binaryContent == nullptr)
2379 {
2380 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2381 CHECK_LOCATION().AsString()));
2382 }
2383 flatbuffers::Verifier verifier(binaryContent, len);
2384 if (verifier.VerifyBuffer<tflite::Model>() == false)
2385 {
2386 throw ParseException(
2387 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2388 "flatbuffers format. size:%1% %2%") %
2389 len %
2390 CHECK_LOCATION().AsString()));
2391 }
2392 return tflite::UnPackModel(binaryContent);
2393}
2394
2395TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2396 size_t subgraphIndex,
2397 size_t operatorIndex)
2398{
2399 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2400
Derek Lambertiff05cc52019-04-26 13:05:17 +01002401 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2402 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002403
2404 size_t inputCount = operatorPtr->inputs.size();
2405 TensorRawPtrVector result(inputCount);
2406 for (size_t i=0; i<inputCount; ++i)
2407 {
2408 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002409 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002410 }
2411 return result;
2412}
2413
2414TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2415 size_t subgraphIndex,
2416 size_t operatorIndex)
2417{
2418 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2419
Derek Lambertiff05cc52019-04-26 13:05:17 +01002420 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2421 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002422
2423 size_t outputCount = operatorPtr->outputs.size();
2424 TensorRawPtrVector result(outputCount);
2425 for (size_t i=0; i<outputCount; ++i)
2426 {
2427 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2428 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002429 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002430 }
2431 return result;
2432}
2433
2434TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2435 size_t subgraphIndex)
2436{
2437 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002438 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002439
Derek Lambertiff05cc52019-04-26 13:05:17 +01002440 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002441 TensorIdRawPtrVector result(inputCount);
2442 for (size_t i=0; i<inputCount; ++i)
2443 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002444 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002445 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002446 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002447 }
2448 return result;
2449}
2450
2451TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2452 size_t subgraphIndex)
2453{
2454 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002455 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002456
Derek Lambertiff05cc52019-04-26 13:05:17 +01002457 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002458 TensorIdRawPtrVector result(outputCount);
2459 for (size_t i=0; i<outputCount; ++i)
2460 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002461 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2462 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002463 }
2464 return result;
2465}
2466
2467std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2468 size_t subgraphIndex,
2469 size_t operatorIndex)
2470{
2471 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002472 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2473 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002474 return operatorPtr->inputs;
2475}
2476
2477std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2478 size_t subgraphIndex,
2479 size_t operatorIndex)
2480{
2481 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002482 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2483 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002484 return operatorPtr->outputs;
2485}
2486
2487void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2488 size_t operatorIndex,
2489 IConnectableLayer* layer,
2490 const std::vector<unsigned int>& tensorIndexes)
2491{
2492 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2493 BOOST_ASSERT(layer != nullptr);
2494 if (tensorIndexes.size() != layer->GetNumInputSlots())
2495 {
2496 throw ParseException(
2497 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2498 " for subgraph:%3% operator index:%4% %5%") %
2499 tensorIndexes.size() %
2500 layer->GetNumInputSlots() %
2501 subgraphIndex %
2502 operatorIndex %
2503 CHECK_LOCATION().AsString()));
2504 }
2505
2506 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2507 {
2508 unsigned int tensorIndex = tensorIndexes[slotIndex];
2509 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2510 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2511 }
2512}
2513
2514void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2515 size_t operatorIndex,
2516 IConnectableLayer* layer,
2517 const std::vector<unsigned int>& tensorIndexes)
2518{
2519 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2520 BOOST_ASSERT(layer != nullptr);
2521 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2522 {
2523 throw ParseException(
2524 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2525 " for subgraph:%3% operator index:%4% %5%") %
2526 tensorIndexes.size() %
2527 layer->GetNumOutputSlots() %
2528 subgraphIndex %
2529 operatorIndex %
2530 CHECK_LOCATION().AsString()));
2531 }
2532
2533 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2534 {
2535 unsigned int tensorIndex = tensorIndexes[slotIndex];
2536 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2537 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2538 }
2539}
2540
2541void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2542{
2543 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2544
2545 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2546 for (auto const & tensorIdAndPtr : inputs)
2547 {
2548 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2549 IConnectableLayer* layer =
2550 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2551
2552 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2553 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2554
2555 RegisterOutputSlots(subgraphIndex,
2556 VIRTUAL_OPERATOR_ID,
2557 layer,
2558 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2559 }
2560}
2561
2562void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2563{
2564 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2565
2566 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2567 for (auto const & tensorIdAndPtr : outputs)
2568 {
2569 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2570 IConnectableLayer* layer =
2571 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2572
2573 RegisterInputSlots(subgraphIndex,
2574 VIRTUAL_OPERATOR_ID,
2575 layer,
2576 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2577 }
2578}
2579
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002580void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2581{
2582 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2583
Derek Lambertiff05cc52019-04-26 13:05:17 +01002584 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002585 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2586 {
2587 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2588 {
2589 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2590 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2591 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002592 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002593 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2594 auto tensorAndData = CreateConstTensor(tensorPtr,
2595 tensorInfo,
2596 armnn::Optional<armnn::PermutationVector&>());
2597
2598 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2599 IConnectableLayer *layer =
2600 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2601
2602 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2603 RegisterOutputSlots(subgraphIndex,
2604 VIRTUAL_OPERATOR_ID,
2605 layer,
2606 { tensorIndex });
2607
2608 }
2609 }
2610 }
2611}
2612
telsoa01c577f2c2018-08-31 09:22:23 +01002613// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2614TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2615{
2616 CHECK_BUFFER(model, bufferIndex);
2617 return model->buffers[bufferIndex].get();
2618}
2619
Matteo Martincigh747ef822018-12-18 09:26:39 +00002620template<typename T>
2621std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2622TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2623 TfLiteParser::TensorRawPtr tensorPtr,
2624 armnn::TensorInfo& tensorInfo,
2625 armnn::Optional<armnn::PermutationVector&> permutationVector)
2626{
2627 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2628 tensorPtr,
2629 tensorInfo,
2630 permutationVector);
2631 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2632 return std::make_pair(constData.first, std::move(storage));
2633}
2634
telsoa01c577f2c2018-08-31 09:22:23 +01002635std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2636TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002637 armnn::TensorInfo& tensorInfo,
2638 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002639{
2640 CHECK_TENSOR_PTR(tensorPtr);
2641 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2642 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2643
2644 switch (tensorInfo.GetDataType())
2645 {
2646 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002647 return CreateConstTensorAndStoreData<float>(bufferPtr,
2648 tensorPtr,
2649 tensorInfo,
2650 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002651 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002652 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2653 tensorPtr,
2654 tensorInfo,
2655 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002656 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002657 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2658 tensorPtr,
2659 tensorInfo,
2660 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002661 default:
2662 {
2663 std::stringstream errString;
2664 errString << "Unexpected datatype when creating const tensor: "
2665 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2666 << " shape:" << tensorInfo.GetShape()
2667 << CHECK_LOCATION().AsString();
2668 throw ParseException(errString.str());
2669 }
2670 }
2671}
2672
2673BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2674 const std::string& name) const
2675{
2676 CHECK_SUBGRAPH(m_Model, subgraphId);
2677 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2678 for (auto const & input : inputs)
2679 {
2680 if (input.second->name == name)
2681 {
2682 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2683 return std::make_pair(bindingId, ToTensorInfo(input.second));
2684 }
2685 }
2686
2687 std::stringstream bindings;
2688 for (auto const & input : inputs)
2689 {
2690 bindings << "'" << input.second->name << "' ";
2691 }
2692
2693 throw ParseException(
2694 boost::str(
2695 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2696 "Possible inputs are: [%3%] %4%") %
2697 subgraphId %
2698 name %
2699 bindings.str() %
2700 CHECK_LOCATION().AsString()));
2701}
2702
2703BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2704 const std::string& name) const
2705{
2706 CHECK_SUBGRAPH(m_Model, subgraphId);
2707 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002708 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002709 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002710 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002711 if (output.second->name == name)
2712 {
2713 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002714 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2715 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2716 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002717 }
2718 }
2719
2720 std::stringstream bindings;
2721 for (auto const & output : outputs)
2722 {
2723 bindings << "'" << output.second->name << "' ";
2724 }
2725
2726 throw ParseException(
2727 boost::str(
2728 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2729 "Possible outputs are: [%3%] %4%") %
2730 subgraphId %
2731 name %
2732 bindings.str() %
2733 CHECK_LOCATION().AsString()));
2734}
2735
2736size_t TfLiteParser::GetSubgraphCount() const
2737{
2738 return m_Model->subgraphs.size();
2739}
2740
2741std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2742{
2743 CHECK_SUBGRAPH(m_Model, subgraphId);
2744 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2745 std::vector<std::string> result;
2746 result.reserve(inputs.size());
2747 for (auto const & input : inputs)
2748 {
2749 result.push_back(input.second->name);
2750 }
2751 return result;
2752}
2753
2754std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2755{
2756 CHECK_SUBGRAPH(m_Model, subgraphId);
2757 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2758 std::vector<std::string> result;
2759 result.reserve(outputs.size());
2760 for (auto const & output : outputs)
2761 {
2762 result.push_back(output.second->name);
2763 }
2764 return result;
2765}
2766
2767ITfLiteParser* ITfLiteParser::CreateRaw()
2768{
2769 return new TfLiteParser();
2770}
2771
2772ITfLiteParserPtr ITfLiteParser::Create()
2773{
2774 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2775}
2776
2777void ITfLiteParser::Destroy(ITfLiteParser* parser)
2778{
2779 delete parser;
2780}
2781
2782TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2783: m_FloatData(std::move(data))
2784, m_Uint8Data(nullptr)
2785, m_Int32Data(nullptr)
2786{
2787}
2788
2789TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2790: m_FloatData(nullptr)
2791, m_Uint8Data(std::move(data))
2792, m_Int32Data(nullptr)
2793{
2794}
2795
2796TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2797: m_FloatData(nullptr)
2798, m_Uint8Data(nullptr)
2799, m_Int32Data(std::move(data))
2800{
2801}
2802
2803} // armnnTfLiteParser