blob: d0d130db82eb8cf14bc4ea66c47411c9102c96d8 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5#include "TfLiteParser.hpp"
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Exceptions.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <boost/filesystem.hpp>
11
12// armnnUtils:
Sadik Armagan479045b2018-10-01 11:51:37 +010013#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010014#include <Permute.hpp>
15#include <VerificationHelpers.hpp>
16
17// The generated code based on the Tf Lite schema:
18#include <schema_generated.h>
19
20#include <boost/core/ignore_unused.hpp>
21#include <boost/assert.hpp>
22#include <boost/format.hpp>
23#include <boost/log/trivial.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010024#include <boost/format.hpp>
25#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026
27#include <fstream>
28#include <algorithm>
29#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010030#include <numeric>
keidav011b3e2ea2019-02-21 10:07:37 +000031#include <flatbuffers/flexbuffers.h>
telsoa01c577f2c2018-08-31 09:22:23 +010032
33using namespace armnn;
34using armnn::CheckLocation;
35namespace armnnTfLiteParser
36{
37namespace
38{
jimfly01c25411c2018-11-14 17:47:22 +000039
telsoa01c577f2c2018-08-31 09:22:23 +010040const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
41
42void CheckSubgraph(const TfLiteParser::ModelPtr & model,
43 size_t subgraphIndex,
44 const CheckLocation & location)
45{
46 if (model.get() == nullptr)
47 {
48 throw ParseException(
49 boost::str(
50 boost::format("%1% was called with invalid (null) model. "
51 "Possible reason is that the model is not yet loaded and Unpack(ed). "
52 "subgraph:%2% at %3%") %
53 location.m_Function %
54 subgraphIndex %
55 location.FileLine()));
56 }
57 else if (subgraphIndex >= model->subgraphs.size())
58 {
59 throw ParseException(
60 boost::str(
61 boost::format("%1% was called with an invalid subgraph index. "
62 "subgraph:%2% at %3%") %
63 location.m_Function %
64 subgraphIndex %
65 location.FileLine()));
66 }
67}
68
69#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
70 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
71
72void CheckModel(const TfLiteParser::ModelPtr & model,
73 size_t subgraphIndex,
74 size_t operatorIndex,
75 const CheckLocation & location)
76{
77 if (model.get() == nullptr)
78 {
79 throw ParseException(
80 boost::str(
81 boost::format("%1% was called with invalid (null) model. "
82 "Possible reason is that the model is not yet loaded and Unpack(ed). "
83 "subgraph:%2% operator:%3% at %4%") %
84 location.m_Function %
85 subgraphIndex %
86 operatorIndex %
87 location.FileLine()));
88 }
89 else if (subgraphIndex >= model->subgraphs.size())
90 {
91 throw ParseException(
92 boost::str(
93 boost::format("%1% was called with an invalid subgraph index. "
94 "subgraph:%2% operator:%3% at %4%") %
95 location.m_Function %
96 subgraphIndex %
97 operatorIndex %
98 location.FileLine()));
99 }
100 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
101 operatorIndex != VIRTUAL_OPERATOR_ID)
102 {
103 throw ParseException(
104 boost::str(
105 boost::format("%1% was called with an invalid operator index. "
106 "subgraph:%2% operator:%3% at %4%") %
107 location.m_Function %
108 subgraphIndex %
109 operatorIndex %
110 location.FileLine()));
111 }
112}
113
114#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
115 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
116
117void CheckTensor(const TfLiteParser::ModelPtr & model,
118 size_t subgraphIndex,
119 size_t tensorIndex,
120 const CheckLocation & location)
121{
122 // not checking model, because I assume CHECK_MODEL already run
123 // and checked that. An assert would do.
124 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
125
126 // also subgraph index should be checked by CHECK_MODEL so
127 // I only add an assert here
128 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
129
130 // the tensor index is the only one to check here
131 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
132 {
133 throw ParseException(
134 boost::str(
135 boost::format("%1% was called with an invalid tensor index. "
136 "subgraph:%2% tensor:%3% at %4%") %
137 location.m_Function %
138 subgraphIndex %
139 tensorIndex %
140 location.FileLine()));
141 }
142}
143
144#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
145 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
146
147void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
148 const CheckLocation & location)
149{
150 if (rawPtr == nullptr)
151 {
152 throw ParseException(
153 boost::str(
154 boost::format("%1% was called with a null tensor pointer. "
155 "at %2%") %
156 location.m_Function %
157 location.FileLine()));
158
159 }
160}
161
162#define CHECK_TENSOR_PTR(TENSOR_PTR) \
163 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
164
165void CheckBuffer(const TfLiteParser::ModelPtr & model,
166 size_t bufferIndex,
167 const CheckLocation & location)
168{
169 if (model.get() == nullptr)
170 {
171 throw ParseException(
172 boost::str(
173 boost::format("%1% was called with invalid (null) model. "
174 "Possible reason is that the model is not yet loaded and Unpack(ed). "
175 "buffer:%2% at %3%") %
176 location.m_Function %
177 bufferIndex %
178 location.FileLine()));
179 }
180 else if (bufferIndex >= model->buffers.size())
181 {
182 throw ParseException(
183 boost::str(
184 boost::format("%1% was called with an invalid buffer index. "
185 "buffer index:%2% at %3%") %
186 location.m_Function %
187 bufferIndex %
188 location.FileLine()));
189 }
190 else if (model->buffers[bufferIndex].get() == nullptr)
191 {
192 throw ParseException(
193 boost::str(
194 boost::format("The buffer #%1% is null. %3%") %
195 bufferIndex %
196 location.AsString()));
197 }
198}
199
200#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
201 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
202
203void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
204 const armnn::TensorInfo & tensorInfo,
205 uint32_t bufferId,
206 const CheckLocation & location)
207{
208 if (bufferPtr == nullptr)
209 {
210 throw ParseException(
211 boost::str(
212 boost::format("BufferPtr is null for buffer:%1%. %2%") %
213 bufferId %
214 location.AsString()));
215 }
216 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
217 tensorInfo.GetNumBytes() > bufferPtr->data.size())
218 {
219 std::stringstream ss;
220 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
221 << "For tensor: " << tensorInfo.GetShape()
222 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
223 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
224 throw ParseException(ss.str());
225 }
226}
227
228#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
229 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
230
231bool IsActivationSupported(tflite::ActivationFunctionType activationType)
232{
233 switch(activationType)
234 {
235 case tflite::ActivationFunctionType_NONE:
236 case tflite::ActivationFunctionType_RELU:
237 case tflite::ActivationFunctionType_RELU6:
238 case tflite::ActivationFunctionType_TANH:
239 {
240 return true;
241 }
242 default:
243 {
244 return false;
245 }
246 }
247}
248
249#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
250 do { \
251 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
252 { \
253 throw ParseException( \
254 boost::str( \
255 boost::format("TfLite parser doesn't suppport fused activation: " \
256 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
257 OPTION->fused_activation_function % \
258 tflite::EnumNameActivationFunctionType(\
259 OPTION->fused_activation_function) % \
260 __func__ % \
261 SUBGRAPH_INDEX % \
262 OPERATOR_INDEX % \
263 CHECK_LOCATION().FileLine())); \
264 } \
265 } while(false)
266
267
268std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
269{
270 std::vector<unsigned int> result;
271 result.reserve(in.size());
272 for (auto & i : in)
273 {
274 result.push_back(CHECKED_NON_NEGATIVE(i));
275 }
276 return result;
277}
278
279void CalcPadding(uint32_t inputSize,
280 uint32_t filterSize,
281 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100282 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100283 uint32_t& paddingFront,
284 uint32_t& paddingBack,
285 tflite::Padding padding)
286{
287 paddingFront = 0;
288 paddingBack = 0;
289 if (padding == tflite::Padding_SAME)
290 {
291 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100292 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
293 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100294 if (temp > inputSize)
295 {
296 paddingFront = (temp - inputSize) / 2;
297 paddingBack = (temp - inputSize) - paddingFront;
298 }
299 }
300}
301
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000302armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100303{
304 armnn::DataType type;
305 CHECK_TENSOR_PTR(tensorPtr);
306
307 switch (tensorPtr->type)
308 {
309 case tflite::TensorType_UINT8:
310 type = armnn::DataType::QuantisedAsymm8;
311 break;
312 case tflite::TensorType_FLOAT32:
313 type = armnn::DataType::Float32;
314 break;
315 case tflite::TensorType_INT32:
316 type = armnn::DataType::Signed32;
317 break;
318
319 default:
320 {
321 CheckLocation location = CHECK_LOCATION();
322 throw ParseException(
323 boost::str(
324 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
325 tensorPtr->type %
326 tflite::EnumNameTensorType(tensorPtr->type) %
327 tensorPtr->name %
328 location.AsString()));
329 }
330 }
331
332 float quantizationScale = 0.0f;
333 int32_t quantizationOffset = 0;
334
335 if (tensorPtr->quantization.get())
336 {
337 CHECK_VALID_SIZE(tensorPtr->quantization->scale.size(), 0, 1);
338 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
339
340 if (tensorPtr->quantization->scale.size() == 1)
341 {
342 quantizationScale = tensorPtr->quantization->scale[0];
343 }
344 if (tensorPtr->quantization->zero_point.size() == 1)
345 {
346 // NOTE: we lose precision here when converting from 64 bit to 32
347 // but this is what we support at the monent in ArmNN
348 quantizationOffset = static_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
349 }
350 }
351
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100352 std::vector<unsigned int> safeShape = shapes;
353 if (safeShape.size() == 0)
354 {
355 safeShape.push_back(1);
356 }
357
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // two statements (on purpose) for easier debugging:
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100359 armnn::TensorInfo result(static_cast<unsigned int>(safeShape.size()),
360 safeShape.data(),
telsoa01c577f2c2018-08-31 09:22:23 +0100361 type,
362 quantizationScale,
363 quantizationOffset);
364 return result;
365}
366
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000367armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
368{
369 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
370 return ToTensorInfo(tensorPtr, dimensions);
371}
372
telsoa01c577f2c2018-08-31 09:22:23 +0100373template<typename T>
374std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
375CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
376 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000377 armnn::TensorInfo& tensorInfo,
378 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100379{
380 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
381 BOOST_ASSERT_MSG(bufferPtr != nullptr,
382 boost::str(
383 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
384
385 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000386
387 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
388 {
389 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000390 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
391 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000392 }
393 else
394 {
395 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
396 }
397
telsoa01c577f2c2018-08-31 09:22:23 +0100398 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
399}
400
telsoa01c577f2c2018-08-31 09:22:23 +0100401armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
402{
403 // generate the binding id by shifting the tensor id by 8 bit
404 // and add the subgraph id, which allows 256 subgraphs
405 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
406}
407
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000408bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
409{
410 const unsigned int actualSize = actual.GetNumDimensions();
411 if (actualSize != expected.size())
412 {
413 return false;
414 }
415
416 for (unsigned int i = 0u; i < actualSize; i++)
417 {
418 if (expected[i] < 0 ||
419 actual[i] != static_cast<unsigned int>(expected[i]))
420 {
421 return false;
422 }
423 }
424
425 return true;
426}
427
telsoa01c577f2c2018-08-31 09:22:23 +0100428} // <anonymous>
429
430TfLiteParser::TfLiteParser()
431: m_Network(nullptr, nullptr)
432, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
433{
434 // register supported operators
435 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200436 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100437 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
telsoa01c577f2c2018-08-31 09:22:23 +0100438 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
439 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
keidav011b3e2ea2019-02-21 10:07:37 +0000440 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
Sadik Armagan8853c1f2018-10-22 09:04:18 +0100441 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Finn Williamsc42c3842019-01-22 14:18:11 +0000442 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100443 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200444 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200445 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan58f39192018-09-17 14:14:39 +0100446 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
447 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
Sadikb94967b2018-09-19 15:30:00 +0100448 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -0200449 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
Sadik Armagan479045b2018-10-01 11:51:37 +0100450 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
Bruno Goncalvesbaded142019-02-08 19:02:48 -0200451 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan479045b2018-10-01 11:51:37 +0100452 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
Bruno Goncalves451d95b2019-02-12 22:59:22 -0200453 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
Bruno Goncalvesbbeae262019-02-07 18:37:39 -0200454 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -0200455 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Bruno Goncalvesf803f782018-12-18 13:40:30 -0200456 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Bruno Goncalves2235cee2018-12-19 12:51:45 -0200457 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Bruno Goncalves6c2355b2018-12-19 12:52:01 -0200458 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
Nina Drozd0324f482019-04-08 10:52:10 +0100459 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Nina Drozd99851762019-04-09 09:37:38 +0100460 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
Nina Drozd200e3802019-04-15 09:47:39 +0100461 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
telsoa01c577f2c2018-08-31 09:22:23 +0100462}
463
464void TfLiteParser::ResetParser()
465{
466 m_Network = armnn::INetworkPtr(nullptr, nullptr);
467 m_Model = nullptr;
468 m_SubgraphConnections.clear();
469}
470
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200471void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
472 size_t operatorIndex,
473 IConnectableLayer *layer)
474{
475 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
476 BOOST_ASSERT(layer != nullptr);
477
Derek Lambertiff05cc52019-04-26 13:05:17 +0100478 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
479 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200480
481 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
482
483 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100484 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200485 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100486 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200487
488 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
489 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
490
491 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
492 {
493 uint32_t id = reshapedInputId;
494 reshapedInputId = inputId;
495 inputId = id;
496
497 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
498 inputTensorInfo = ToTensorInfo(tensorPtr);
499 }
500
501 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
502
503 std::vector<unsigned> reshapedDim;
504 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
505 {
506 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
507 }
508
509 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
510 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
511
512 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
513
514 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
515 armnn::ReshapeDescriptor desc;
516 desc.m_TargetShape = reshapedTensorInfo.GetShape();
517 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
518
519 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
520 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
521
522 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
523
524 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
525 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
526}
527
telsoa01c577f2c2018-08-31 09:22:23 +0100528INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
529{
530 ResetParser();
531 m_Model = LoadModelFromFile(graphFile);
532 return CreateNetworkFromModel();
533}
534
535INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
536{
537 ResetParser();
538 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
539 return CreateNetworkFromModel();
540}
541
542INetworkPtr TfLiteParser::CreateNetworkFromModel()
543{
544 m_Network = INetwork::Create();
545 BOOST_ASSERT(m_Model.get() != nullptr);
546
547 bool failedToCreate = false;
548 std::stringstream errors;
549
550 if (m_Model->subgraphs.size() != 1)
551 {
552 throw ParseException(
553 boost::str(
554 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
555 m_Model->subgraphs.size() %
556 CHECK_LOCATION().AsString()));
557 }
558
559 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100560 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100561 {
562 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
563
564 size_t operatorIndex = 0;
565 for (OperatorPtr const & op : subgraph->operators)
566 {
567 try
568 {
telsoa01c577f2c2018-08-31 09:22:23 +0100569 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
570 auto builtinCode = opCodePtr->builtin_code;
571
572 if (builtinCode > tflite::BuiltinOperator_MAX)
573 {
574 throw ParseException(
575 boost::str(
576 boost::format("Operator code %1% is out of range 0-%2%. "
577 "subgraph:%3% operator idx:%4%. %5%") %
578 builtinCode %
579 tflite::BuiltinOperator_MAX %
580 subgraphIndex %
581 operatorIndex %
582 CHECK_LOCATION().AsString()));
583 }
584
585 // lookup and call the parser function
586 auto & parserFunction = m_ParserFunctions[builtinCode];
587 (this->*parserFunction)(subgraphIndex, operatorIndex);
588 }
589 catch (const ParseException& e)
590 {
591 failedToCreate = true;
592 std::stringstream errorString;
593
594 errorString << "Failed to parse operator #" << operatorIndex
595 << " within subgraph #" << subgraphIndex
596 << " error: " << e.what();
597 BOOST_LOG_TRIVIAL(error) << errorString.str();
598
599 errors << errorString.str() << "\n";
600 }
601 ++operatorIndex;
602 }
603
604 SetupInputLayers(subgraphIndex);
605 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200606 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100607
608 ++subgraphIndex;
609 }
610
611 if (failedToCreate)
612 {
613 // we can skip everything and let the outer exception handler deal with the error
614 throw ParseException(errors.str());
615 }
616
617 // establish the connections from the layer outputs to the inputs of the subsequent layers
618 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
619 {
620 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
621 {
622 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
623 {
624 for (size_t inputSlotIdx = 0;
625 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
626 ++inputSlotIdx)
627 {
628 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
629 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
630 }
631 }
632 }
633 }
634
635 return std::move(m_Network);
636}
637
638void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
639 size_t tensorIndex,
640 armnn::IOutputSlot* slot)
641{
642 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
643 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
644 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
645
646 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
647
648 // assuming there is only one producer for that tensor
649 if (tensorSlots.outputSlot != nullptr)
650 {
651 throw ParseException(boost::str(
652 boost::format("Another layer has already registered itself as the producer of "
653 "subgraph:%1% tensor:%2% %3%") %
654 subgraphIndex %
655 tensorIndex %
656 CHECK_LOCATION().AsString()));
657 }
658
659 tensorSlots.outputSlot = slot;
660}
661
662void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
663 size_t tensorIndex,
664 armnn::IInputSlot* slot)
665{
666 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
667 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
668 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
669
670 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
671 tensorSlots.inputSlots.push_back(slot);
672}
673
674void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
675{
676 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
677 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
678 //
679 auto opcodeIndex = operatorPtr->opcode_index;
680 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
681
682 throw ParseException(
683 boost::str(
684 boost::format("Operator not supported. "
685 "subgraph:%1% operator:%2% "
686 "opcode_index:%3% opcode:%4% / %5% %6%") %
687 subgraphIndex %
688 operatorIndex %
689 opcodeIndex %
690 opcode %
691 tflite::EnumNameBuiltinOperator(opcode) %
692 CHECK_LOCATION().AsString()));
693}
694
telsoa01c577f2c2018-08-31 09:22:23 +0100695void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
696{
697 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
698
699 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
700 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
701
702 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
703
704 Convolution2dDescriptor desc;
705 desc.m_BiasEnabled = false;
706 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
707 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000708 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100709 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
710 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000711
telsoa01c577f2c2018-08-31 09:22:23 +0100712 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
713 CHECK_VALID_SIZE(inputs.size(), 2, 3);
714
715 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
716 CHECK_VALID_SIZE(outputs.size(), 1);
717
718 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
719 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
720
721 // assuming input is NHWC
722 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
723 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
724
725 // assuming the filter is OHWI : Output, H, W, Input
726 // which is essentially the same as NHWC
727 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
728 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
729
Pablo Tellof0bd6832019-04-26 17:58:13 +0100730 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
731 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
732 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
733 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100734
Matteo Martincigh747ef822018-12-18 09:26:39 +0000735 auto filterTensorAndData = CreateConstTensor(inputs[1],
736 filterTensorInfo,
737 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100738 armnn::IConnectableLayer* layer;
739
740 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
741
742 if (inputs.size() == 3)
743 {
744 desc.m_BiasEnabled = true;
745 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000746 auto biasTensorAndData = CreateConstTensor(inputs[2],
747 biasTensorInfo,
748 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100749 layer = m_Network->AddConvolution2dLayer(desc,
750 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100751 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100752 layerName.c_str());
753 }
754 else
755 {
756 layer = m_Network->AddConvolution2dLayer(desc,
757 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100758 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100759 layerName.c_str());
760 }
761
762 BOOST_ASSERT(layer != nullptr);
763
telsoa01c577f2c2018-08-31 09:22:23 +0100764 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000765 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100766
767 // register the input connection slots for the layer, connections are made after all layers have been created
768 // only the tensors for the inputs are relevant, exclude the const tensors
769 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000770 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100771
jimfly01c25411c2018-11-14 17:47:22 +0000772 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100773 // register the output connection slots for the layer, connections are made after all layers have been created
774 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
775 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
776}
777
778void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
779{
780 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
781
782 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
783 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
784
785 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
786
787 DepthwiseConvolution2dDescriptor desc;
788 desc.m_BiasEnabled = false;
789 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
790 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000791 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Telloe0200da2019-05-29 14:09:19 +0100792 // ACL only supports a depth (channel) multiplier of {1,2,3}, it is not currently stored in the descriptor
793 CHECK_VALID_SIZE(CHECKED_NON_NEGATIVE(options->depth_multiplier), 1,2,3 );
telsoa01c577f2c2018-08-31 09:22:23 +0100794
795 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
796 CHECK_VALID_SIZE(inputs.size(), 2, 3);
797 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
798 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100799 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
800 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000801
telsoa01c577f2c2018-08-31 09:22:23 +0100802 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
803 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
804
Matteo Martincigh747ef822018-12-18 09:26:39 +0000805 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100806 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
807 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000808
809 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100810 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
811 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
812
Matteo Martincigh747ef822018-12-18 09:26:39 +0000813 // Reshape weights as [ H, W, I, M ]
814 filterTensorInfo.SetShape({ filterHeight,
815 filterWidth,
816 inputTensorInfo.GetShape()[3],
817 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
818
819 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
820 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
821
Pablo Tellof0bd6832019-04-26 17:58:13 +0100822 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
823 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
824 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
825 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100826
Matteo Martincigh747ef822018-12-18 09:26:39 +0000827 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100828 armnn::IConnectableLayer* layer;
829 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
830
831 if (inputs.size() == 3)
832 {
833 desc.m_BiasEnabled = true;
834 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000835 auto biasTensorAndData = CreateConstTensor(inputs[2],
836 biasTensorInfo,
837 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100838 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
839 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100840 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100841 layerName.c_str());
842 }
843 else
844 {
845 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
846 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100847 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100848 layerName.c_str());
849 }
850 BOOST_ASSERT(layer != nullptr);
851
telsoa01c577f2c2018-08-31 09:22:23 +0100852 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000853 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100854
855 // register the input connection slots for the layer, connections are made after all layers have been created
856 // only the tensors for the inputs are relevant, exclude the const tensors
857 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000858 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100859
jimfly01c25411c2018-11-14 17:47:22 +0000860 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100861 // register the output connection slots for the layer, connections are made after all layers have been created
862 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
863 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
864}
865
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100866void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
867{
868 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
869}
870
Bruno Goncalvesdb947e22019-02-08 18:52:21 -0200871void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
872{
873 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
874
875 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
876 CHECK_VALID_SIZE(inputs.size(), 3);
877
878 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
879 CHECK_VALID_SIZE(outputs.size(), 1);
880
881 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
882 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
883
884 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
885 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
886
887 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
888 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
889
890 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
891 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
892
893 size_t step = 2;
894 std::vector<std::pair<unsigned int, unsigned int>> crops;
895 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
896 {
897 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
898 }
899
900 armnn::BatchToSpaceNdDescriptor desc;
901 desc.m_BlockShape = blockShape;
902 desc.m_Crops = crops;
903 desc.m_DataLayout = armnn::DataLayout::NHWC;
904
905 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
906
907 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
908 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
909
910 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
911
912 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
913 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
914
915 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
916 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
917}
918
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100919void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
920{
921 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
922}
923
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -0200924void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
925{
926 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
927
928 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
929 CHECK_VALID_SIZE(inputs.size(), 2);
930
931 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
932 CHECK_VALID_SIZE(outputs.size(), 1);
933
934 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
935 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
936
937 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
938 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
939
940 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
941 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
942
943 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
944 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
945 {
946 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
947 }
948 else
949 {
950 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
951 }
952
953 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
954 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
955}
956
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -0200957void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
958{
959 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
960
961 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
962 CHECK_VALID_SIZE(inputs.size(), 2);
963
964 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
965 CHECK_VALID_SIZE(outputs.size(), 1);
966
967 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
968 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
969
970 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
971 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
972
973 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
974 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
975
976 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
977 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
978 {
979 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
980 }
981 else
982 {
983 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
984 }
985
986 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
987 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
988}
989
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +0100990void TfLiteParser::ParsePool(size_t subgraphIndex,
991 size_t operatorIndex,
992 PoolingAlgorithm algorithm)
993{
994 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
995
996 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
997 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
998
999 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1000
1001 std::string layerName;
1002
1003 switch (algorithm)
1004 {
1005 case PoolingAlgorithm::Average:
1006 layerName =
1007 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1008 break;
1009 case PoolingAlgorithm::Max:
1010 layerName =
1011 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1012 break;
1013 default:
1014 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1015 }
1016
1017 Pooling2dDescriptor desc;
1018
1019 desc.m_PoolType = algorithm;
1020 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1021 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1022 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1023 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1024 desc.m_PaddingMethod = PaddingMethod::Exclude;
1025 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001026 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001027
1028 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1029 CHECK_VALID_SIZE(inputs.size(), 1);
1030 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1031
1032 // assuming input is NHWC
1033 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1034 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1035
Pablo Tellof0bd6832019-04-26 17:58:13 +01001036 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1037 desc.m_PadTop, desc.m_PadBottom, options->padding);
1038 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1039 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001040
1041 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1042 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001043
1044 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1045
1046 BOOST_ASSERT(layer != nullptr);
1047
jimfly01c25411c2018-11-14 17:47:22 +00001048 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1049 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001050
1051 // register the input connection slots for the layer, connections are made after all layers have been created
1052 // only the tensors for the inputs are relevant, exclude the const tensors
1053 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001054 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001055
jimfly01c25411c2018-11-14 17:47:22 +00001056 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001057 // register the output connection slots for the layer, connections are made after all layers have been created
1058 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1059 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1060}
1061
telsoa01c577f2c2018-08-31 09:22:23 +01001062void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1063{
1064 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1065 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1066 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1067
1068 SoftmaxDescriptor desc;
1069 desc.m_Beta = options->beta;
1070
1071 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1072 CHECK_VALID_SIZE(inputs.size(), 1);
1073 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1074 CHECK_VALID_SIZE(outputs.size(), 1);
1075
1076 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1077 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1078
1079 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1080 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1081
1082 // register the input connection slots for the layer, connections are made after all layers have been created
1083 // only the tensors for the inputs are relevant, exclude the const tensors
1084 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1085 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1086
1087 // register the output connection slots for the layer, connections are made after all layers have been created
1088 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1089 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1090}
1091
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001092void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1093{
1094 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1095
1096 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1097 CHECK_VALID_SIZE(inputs.size(), 3);
1098
1099 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1100 CHECK_VALID_SIZE(outputs.size(), 1);
1101
1102 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1103 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1104
1105 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1106 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1107
1108 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1109 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1110
1111 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1112 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1113
1114 size_t step = 2;
1115 std::vector<std::pair<unsigned int, unsigned int>> padList;
1116 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1117 {
1118 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1119 }
1120
1121 armnn::SpaceToBatchNdDescriptor desc;
1122 desc.m_BlockShape = blockShape;
1123 desc.m_PadList = padList;
1124 desc.m_DataLayout = armnn::DataLayout::NHWC;
1125
1126 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1127
1128 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1129 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1130
1131 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1132
1133 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1134 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1135
1136 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1137 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1138}
1139
telsoa01c577f2c2018-08-31 09:22:23 +01001140armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1141 const armnn::TensorInfo & inputTensorInfo)
1142{
1143 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1144 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1145 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1146
1147 if (inputTensorInfo.GetNumDimensions() > 4)
1148 {
1149 std::stringstream ss;
1150 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1151 << " shape:" << inputTensorInfo.GetShape() << " "
1152 << CHECK_LOCATION().AsString();
1153 throw ParseException(ss.str());
1154 }
1155
1156 if (squeezeDims.empty())
1157 {
1158 squeezeDims.assign(dimensionSequence,
1159 dimensionSequence+inputTensorInfo.GetNumDimensions());
1160 }
1161
1162 std::vector<uint32_t> outputDims;
1163 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1164 {
1165 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1166 auto currentDimension = inputTensorInfo.GetShape()[i];
1167 if (skipSqueeze || currentDimension != 1)
1168 {
1169 outputDims.push_back(currentDimension);
1170 }
1171 }
1172
1173 if (outputDims.size() > 4)
1174 {
1175 std::stringstream ss;
1176 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1177 << " shape:" << inputTensorInfo.GetShape() << " "
1178 << CHECK_LOCATION().AsString();
1179 throw ParseException(ss.str());
1180 }
1181
1182 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1183 outputDims.data());
1184
1185 // we need to preserve the tensor type and the quantization data as well
1186 TensorInfo outTensorInfo = inputTensorInfo;
1187 outTensorInfo.SetShape(outShape);
1188
1189 return outTensorInfo;
1190}
1191
1192void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1193{
1194 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1195
1196 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1197 CHECK_VALID_SIZE(inputs.size(), 1);
1198
1199 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1200 CHECK_VALID_SIZE(outputs.size(), 1);
1201
1202 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1203 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1204
1205 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1206 armnn::TensorInfo outputTensorInfo =
1207 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1208 inputTensorInfo);
1209
1210 ReshapeDescriptor reshapeDesc;
1211 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1212
1213 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1214 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1215 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1216
1217 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1218 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1219
1220 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1221 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1222}
1223
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001224void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1225{
1226 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1227
1228 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1229 CHECK_VALID_SIZE(inputs.size(), 4);
1230
1231 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1232 CHECK_VALID_SIZE(outputs.size(), 1);
1233
1234 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1235 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1236
1237 StridedSliceDescriptor desc;
1238 desc.m_BeginMask = options->begin_mask;
1239 desc.m_EllipsisMask = options->ellipsis_mask;
1240 desc.m_EndMask = options->end_mask;
1241 desc.m_NewAxisMask = options->new_axis_mask;
1242 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1243 desc.m_DataLayout = armnn::DataLayout::NHWC;
1244
1245 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1246 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1247
1248 std::vector<int> begin(beginTensorInfo.GetNumElements());
1249 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1250
1251 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1252 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1253
1254 std::vector<int> end(endTensorInfo.GetNumElements());
1255 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1256
1257 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1258 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1259
1260 std::vector<int> stride(strideTensorInfo.GetNumElements());
1261 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1262
1263 desc.m_Begin = begin;
1264 desc.m_End = end;
1265 desc.m_Stride = stride;
1266
1267 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1268 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1269
1270 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1271 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1272
1273 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1274 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1275
1276 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1277 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1278}
1279
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001280void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1281{
1282 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1283
1284 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1285 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1286
1287 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1288 CHECK_VALID_SIZE(inputs.size(), 2);
1289
1290 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1291 CHECK_VALID_SIZE(outputs.size(), 1);
1292
1293 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1294 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1295
1296 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1297 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1298
1299 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1300 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1301
1302 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1303 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1304 {
1305 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1306 }
1307 else
1308 {
1309 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1310 }
1311
1312 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1313
1314 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1315 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1316}
1317
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001318void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1319{
1320 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1321
1322 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1323 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1324
1325 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1326 CHECK_VALID_SIZE(inputs.size(), 2);
1327
1328 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1329 CHECK_VALID_SIZE(outputs.size(), 1);
1330
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001331 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1332 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1333
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001334 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1335 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1336
1337 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1338 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1339
1340 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001341 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1342 {
1343 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1344 }
1345 else
1346 {
1347 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1348 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001349
1350 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1351
1352 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1353 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1354}
1355
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001356void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1357{
1358 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1359
1360 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1361 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1362
1363 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1364 CHECK_VALID_SIZE(inputs.size(), 2);
1365
1366 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1367 CHECK_VALID_SIZE(outputs.size(), 1);
1368
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001369 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1370 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1371
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001372 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1373 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1374
1375 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1376 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1377
1378 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001379 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1380 {
1381 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1382 }
1383 else
1384 {
1385 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1386 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001387
1388 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1389
1390 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1391 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1392}
1393
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001394void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1395{
1396 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1397
1398 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1399
1400 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1401 CHECK_VALID_SIZE(outputs.size(), 1);
1402
1403 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1404 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1405
1406 armnn::MeanDescriptor desc;
1407 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1408 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1409 desc.m_Axis = axis;
1410
1411 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1412 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1413
1414 desc.m_KeepDims =
1415 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1416 true : false;
1417
1418 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1419 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1420
1421 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1422
1423 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1424 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1425
1426 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1427 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1428}
1429
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001430void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1431{
1432 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1433
1434 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1435
1436 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1437 CHECK_VALID_SIZE(outputs.size(), 1);
1438
1439 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1440 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1441
1442 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1443 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1444
1445 size_t step = 2;
1446 armnn::PadDescriptor desc;
1447 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1448 {
1449 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1450 }
1451
1452 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1453 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1454
1455 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1456 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1457
1458 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1459 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1460
1461 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1462 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1463}
1464
Finn Williamsc42c3842019-01-22 14:18:11 +00001465
Sadik Armagan58f39192018-09-17 14:14:39 +01001466void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1467{
Finn Williamsc42c3842019-01-22 14:18:11 +00001468 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001469}
1470
1471void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1472{
Finn Williamsc42c3842019-01-22 14:18:11 +00001473 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1474}
Sadik Armagan58f39192018-09-17 14:14:39 +01001475
Finn Williamsc42c3842019-01-22 14:18:11 +00001476void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1477{
1478 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1479}
1480
Nina Drozd99851762019-04-09 09:37:38 +01001481void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1482{
1483 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1484}
1485
Finn Williamsc42c3842019-01-22 14:18:11 +00001486
1487void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1488{
1489 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001490 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1491 boost::ignore_unused(operatorPtr);
1492
1493 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1494 CHECK_VALID_SIZE(inputs.size(), 1);
1495
1496 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1497 CHECK_VALID_SIZE(outputs.size(), 1);
1498
Finn Williamsc42c3842019-01-22 14:18:11 +00001499 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001500 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001501 activationDesc.m_Function = activationType;
1502
1503 switch (activationType)
1504 {
1505 case ActivationFunction::ReLu:
1506 {
1507 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1508 break;
1509 }
1510 case ActivationFunction::BoundedReLu:
1511 {
1512 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1513 activationDesc.m_A = 6.0f;
1514 activationDesc.m_B = 0.0f;
1515 break;
1516 }
1517 case ActivationFunction::Sigmoid:
1518 {
1519 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1520 break;
1521 }
Nina Drozd99851762019-04-09 09:37:38 +01001522 case ActivationFunction::TanH:
1523 {
1524 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1525 activationDesc.m_A = 1.0f;
1526 activationDesc.m_B = 1.0f;
1527 break;
1528 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001529 default:
1530 {
1531 throw ParseException(
1532 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1533 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1534 }
1535 }
1536
1537 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001538
1539 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1540 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1541
1542 // register the input connection slots for the layer, connections are made after all layers have been created
1543 // only the tensors for the inputs are relevant, exclude the const tensors
1544 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1545 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1546
1547 // register the output connection slots for the layer, connections are made after all layers have been created
1548 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1549 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1550}
Sadikb94967b2018-09-19 15:30:00 +01001551armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1552 const std::vector<int32_t> & targetDimsIn)
1553{
1554 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1555 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1556
1557 if (stretchDim != targetDimsIn.end())
1558 {
1559 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1560 {
1561 throw ParseException(
1562 boost::str(
1563 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1564 }
1565
1566 auto targetNumElements =
1567 boost::numeric_cast<unsigned int>(
1568 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1569
1570 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1571 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1572 }
1573
1574 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1575
1576 TensorInfo reshapeInfo = inputTensorInfo;
1577 reshapeInfo.SetShape(outputShape);
1578
1579 return reshapeInfo;
1580}
1581
1582void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1583{
1584 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1585
1586 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001587
1588 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1589 CHECK_VALID_SIZE(outputs.size(), 1);
1590
1591 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1592 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1593
1594 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001595 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1596 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001597 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1598
kevmay0171972a82018-12-17 14:28:03 +00001599 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001600 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1601 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001602 {
1603 std::stringstream ss;
1604 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001605 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001606 << " does not equal output shape "
1607 << actualOutputTensorInfo.GetShape()
1608 << ": "
1609 << CHECK_LOCATION().AsString();
1610 throw ParseException(ss.str());
1611 }
1612
Sadikb94967b2018-09-19 15:30:00 +01001613 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001614 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001615
1616 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1617 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001618 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001619
1620 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1621 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1622
1623 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1624 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1625}
1626
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001627void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1628{
1629 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1630
1631 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1632 CHECK_VALID_SIZE(inputs.size(), 2);
1633
1634 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1635 CHECK_VALID_SIZE(outputs.size(), 1);
1636
1637 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1638
1639 // Data for the parsed tensor args (size) must be stored locally.
1640 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1641
1642 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1643 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1644
1645 ResizeBilinearDescriptor desc;
1646 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
1647 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1648 desc.m_DataLayout = armnn::DataLayout::NHWC;
1649
1650 auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
1651 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, layerName.c_str());
1652
1653 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1654 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1655
1656 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1657 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1658
1659 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1660 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1661}
1662
Sadik Armagan479045b2018-10-01 11:51:37 +01001663void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
1664{
1665 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1666
1667 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1668 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
1669
1670 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1671
1672 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1673 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1674 CHECK_VALID_SIZE(outputs.size(), 1);
1675
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001676 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
1677 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01001678
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001679 const unsigned int concatDimInput = static_cast<unsigned int>(
1680 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01001681
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001682 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
1683 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01001684
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001685 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01001686
1687 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1688 {
1689 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
1690
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001691 // This set up concatDescriptor view origin
1692 armnnUtils::ProcessConcatInputTensorInfo(
1693 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01001694 }
1695
1696 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01001697 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01001698
1699 BOOST_ASSERT(layer != nullptr);
1700
1701 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1702 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01001703
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001704 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01001705
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001706 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01001707
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00001708 // add fused activation layer
1709 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01001710
Sadik Armagan479045b2018-10-01 11:51:37 +01001711 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1712 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1713}
1714
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001715void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
1716{
1717 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1718
1719 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1720 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
1721
1722 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1723
1724 FullyConnectedDescriptor desc;
1725 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01001726 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001727
1728 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1729 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1730 CHECK_VALID_SIZE(outputs.size(), 1);
1731
1732 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1733
1734 // Fully Connected Layer accepts two dimensional weights input
1735 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
1736 if (weightsDimension != 2)
1737 {
1738 throw ParseException(
1739 boost::str(
1740 boost::format(
1741 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
1742 "Node %2%")
1743 % weightsDimension
1744 % CHECK_LOCATION().AsString()));
1745 }
1746
Matteo Martincigh747ef822018-12-18 09:26:39 +00001747 auto filterTensorAndData = CreateConstTensor(inputs[1],
1748 filterTensorInfo,
1749 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001750 armnn::IConnectableLayer* layer;
1751 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
1752
1753 if (inputs.size() == 3)
1754 {
1755 desc.m_BiasEnabled = true;
1756 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001757 auto biasTensorAndData = CreateConstTensor(inputs[2],
1758 biasTensorInfo,
1759 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001760 layer = m_Network->AddFullyConnectedLayer(desc,
1761 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001762 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001763 layerName.c_str());
1764 }
1765 else
1766 {
1767 layer = m_Network->AddFullyConnectedLayer(desc,
1768 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001769 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001770 layerName.c_str());
1771 }
1772 BOOST_ASSERT(layer != nullptr);
1773
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001774 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1775
1776 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1777
1778 if (inputTensorInfo.GetNumDimensions() > 2)
1779 {
1780 // Add reshape to flatten to 2D [batch_size, input_size],
1781 // where "input_size" corresponds to the number of inputs to the layer,
1782 // matching the second dimension of weights,
1783 // and "batch_size" is calculated by dividing the number of elements by "input_size".
1784 std::vector<unsigned int> reshapedDimensions(2);
1785 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
1786 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
1787
1788 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
1789 {
1790 throw ParseException(
1791 boost::str(
1792 boost::format(
1793 "Failed to deduce input tensor shape from filter size %1%")
1794 % reshapedDimensions[1]
1795 % CHECK_LOCATION().AsString()));
1796 }
1797
1798 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
1799 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
1800
1801 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
1802 armnn::ReshapeDescriptor desc;
1803 desc.m_TargetShape = reshapedTensorInfo.GetShape();
1804 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
1805
1806 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
1807 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1808
1809 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
1810 }
1811 else
1812 {
1813 // register the input connection slot for the layer
1814 // only the tensors for the inputs are relevant, exclude the const tensors
1815 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1816 }
1817
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001818 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1819 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1820
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001821 // we need to add the activation layer and fortunately we don't need to care about the data layout
1822 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
1823 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01001824
Sadik Armagan8853c1f2018-10-22 09:04:18 +01001825 // register the output connection slots for the layer, connections are made after all layers have been created
1826 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1827 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
1828}
1829
keidav011b3e2ea2019-02-21 10:07:37 +00001830void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
1831{
1832 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1833
1834 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1835
1836 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1837 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1838 CHECK_VALID_SIZE(outputs.size(), 4);
1839
1840 // Obtain custom options from flexbuffers
1841 auto custom_options = operatorPtr->custom_options;
1842 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
1843
1844 // Obtain descriptor information from tf lite
1845 DetectionPostProcessDescriptor desc;
1846 desc.m_MaxDetections = m["max_detections"].AsUInt32();
1847 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
1848 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
1849 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
1850 desc.m_NumClasses = m["num_classes"].AsUInt32();
1851 desc.m_ScaleH = m["h_scale"].AsFloat();
1852 desc.m_ScaleW = m["w_scale"].AsFloat();
1853 desc.m_ScaleX = m["x_scale"].AsFloat();
1854 desc.m_ScaleY = m["y_scale"].AsFloat();
1855
keidav0107d58c72019-02-26 11:57:39 +00001856 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00001857 {
keidav0107d58c72019-02-26 11:57:39 +00001858 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00001859 }
1860 if (!(m["detections_per_class"].IsNull()))
1861 {
1862 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
1863 }
1864
1865 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
1866 {
1867 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
1868 "must be positive and less than or equal to 1.");
1869 }
1870
1871 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
1872 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
1873 armnn::Optional<armnn::PermutationVector&>());
1874
1875 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
1876 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
1877 layerName.c_str());
1878
1879 BOOST_ASSERT(layer != nullptr);
1880
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001881 // The model does not specify the output shapes.
1882 // The output shapes are calculated from the max_detection and max_classes_per_detection.
1883 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
1884 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
1885 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
1886 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
1887 m_OverridenOutputShapes.push_back({ 1 });
1888
keidav011b3e2ea2019-02-21 10:07:37 +00001889 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
1890 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00001891 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00001892 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
1893 }
1894
1895 // Register the input connection slots for the layer, connections are made after all layers have been created
1896 // only the tensors for the inputs are relevant, exclude the const tensors
1897 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1898 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1899
1900 // Register the output connection slots for the layer, connections are made after all layers have been created
1901 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1902 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
1903 outputTensorIndexes[1],
1904 outputTensorIndexes[2],
1905 outputTensorIndexes[3]});
1906}
1907
Nina Drozd200e3802019-04-15 09:47:39 +01001908void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
1909{
1910 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1911
1912 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1913 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
1914
1915 // This unpackAxis indicates the axis to unpack
1916 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
1917
1918 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1919 CHECK_VALID_SIZE(inputs.size(), 1);
1920
1921 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01001922
1923 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
1924 {
1925 throw ParseException(
1926 boost::str(
1927 boost::format(
1928 "The unpack axis: %1% cannot be greater than or equal to "
1929 "the number of input dimension %2% %3%")
1930 % unpackAxis
1931 % inputTensorInfo.GetNumDimensions()
1932 % CHECK_LOCATION().AsString()));
1933 }
1934
Nina Drozd200e3802019-04-15 09:47:39 +01001935 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
1936 // If num is not defined, automatically infer from the length of the dimension axis.
1937 if(unpackNum == 0)
1938 {
1939 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
1940 }
1941
1942 // If unpack number cannot be inferred and is still zero, throw ParseException.
1943 if(unpackNum == 0)
1944 {
1945 throw ParseException("Number to unpack must greater than zero.");
1946 }
1947
1948 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1949 CHECK_VALID_SIZE(outputs.size(), unpackNum);
1950
1951 auto inputDimSize = inputTensorInfo.GetNumDimensions();
1952 std::vector<unsigned int> unpackDimSizes(inputDimSize);
1953
1954 // Add current input shape to unpackDimSizes
1955 for (unsigned int i = 0; i < inputDimSize; ++i)
1956 {
1957 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
1958 }
1959
1960 if (unpackDimSizes[unpackAxis] != unpackNum)
1961 {
1962 throw ParseException("Number to unpack must be the same as length of the dimension to "
1963 "unpack along.");
1964 }
1965
1966 unpackDimSizes[unpackAxis] /= unpackNum;
1967
1968 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
1969 for (unsigned int j = 0; j < unpackNum; ++j)
1970 {
1971 // Set the size of the views.
1972 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
1973 {
1974 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
1975 }
1976 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
1977 }
1978
1979 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
1980 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
1981
Narumol Prangnawarat672de572019-04-23 15:28:06 +01001982 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
1983 unpackDimSizes.data());
1984
Nina Drozd200e3802019-04-15 09:47:39 +01001985 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1986 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1987
Narumol Prangnawarat672de572019-04-23 15:28:06 +01001988 // Reshape to remove unpacked dimension
1989 unsigned int reshapedNumDimensions = inputDimSize - 1;
1990 std::vector<unsigned int> reshapedDimensions(reshapedNumDimensions);
Nina Drozd200e3802019-04-15 09:47:39 +01001991
Narumol Prangnawarat672de572019-04-23 15:28:06 +01001992 unsigned int reshapeIndex = 0;
1993 for (unsigned int i = 0; i < inputDimSize; ++i)
Nina Drozd200e3802019-04-15 09:47:39 +01001994 {
Narumol Prangnawarat672de572019-04-23 15:28:06 +01001995 if (i == unpackAxis)
1996 {
1997 continue;
1998 }
1999 reshapedDimensions[reshapeIndex++] = unpackDimSizes[i];
Nina Drozd200e3802019-04-15 09:47:39 +01002000 }
2001
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002002 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2003 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2004 {
2005 armnn::TensorInfo reshapedTensorInfo = inputTensorInfo;
2006 reshapedTensorInfo.SetShape(armnn::TensorShape{ reshapedNumDimensions, reshapedDimensions.data() });
2007
2008 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2009 armnn::ReshapeDescriptor desc;
2010 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2011 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2012
2013 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape, inputTensorInfo.GetDataType()));
2014 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2015
2016 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2017
2018 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2019 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2020 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2021 }
Nina Drozd200e3802019-04-15 09:47:39 +01002022}
2023
Nina Drozd0324f482019-04-08 10:52:10 +01002024void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2025{
2026 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2027
2028 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2029 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2030
2031 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2032
Nina Drozd200e3802019-04-15 09:47:39 +01002033 // If number of splits cannot be inferred and is zero, throw ParseException.
2034 if(numSplits == 0)
2035 {
2036 throw ParseException("Number to splits must greater than zero.");
2037 }
2038
Nina Drozd0324f482019-04-08 10:52:10 +01002039 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2040 CHECK_VALID_SIZE(inputs.size(), 2);
2041 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2042 CHECK_VALID_SIZE(outputs.size(), numSplits);
2043
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002044 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2045 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002046
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002047 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2048 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2049 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2050
2051 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2052 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002053
2054 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2055 if (splitDim == 0 || splitDim == 2)
2056 {
2057 throw ParseException(
2058 boost::str(
2059 boost::format(
2060 "Dimension %1% for split is not supported by Armnn. %2%")
2061 % splitDim
2062 % CHECK_LOCATION().AsString()));
2063 }
2064
2065 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002066 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002067 {
2068 throw ParseException(
2069 boost::str(
2070 boost::format(
2071 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002072 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002073 % inputTensorInfo.GetNumDimensions()
2074 % MaxNumOfTensorDimensions
2075 % CHECK_LOCATION().AsString()));
2076 }
2077
2078 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2079
2080 // Add current input shape to splitterDimSizes
2081 for (unsigned int i = 0; i < inputDimSize; ++i)
2082 {
2083 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2084 }
2085
2086 if (splitterDimSizes[splitDim] % numSplits != 0)
2087 {
2088 throw ParseException("Number of splits must evenly divide the dimension");
2089 }
2090 splitterDimSizes[splitDim] /= numSplits;
2091
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002092 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002093 for (unsigned int j = 0; j < numSplits; ++j)
2094 {
2095 // Set the size of the views.
2096 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2097 {
2098 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2099 }
2100 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2101 }
2102
2103 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2104 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2105
2106 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002107 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002108
2109 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2110 splitterDimSizes.data());
2111
2112 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2113 {
2114 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(outShape,
2115 inputTensorInfo.GetDataType()));
2116 }
2117
2118 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2119 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2120}
2121
Sadik Armagan58f39192018-09-17 14:14:39 +01002122armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2123 unsigned int outputSlot,
2124 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002125{
2126 ActivationDescriptor activationDesc;
2127 std::string layerName = prevLayer->GetName();
2128
2129 switch(activationType)
2130 {
2131 case tflite::ActivationFunctionType_NONE:
2132 {
2133 // this is a no-op: return previous layer
2134 return prevLayer;
2135 }
2136 case tflite::ActivationFunctionType_RELU:
2137 {
2138 activationDesc.m_Function = ActivationFunction::ReLu;
2139 layerName += ":RELU";
2140 break;
2141 }
2142 case tflite::ActivationFunctionType_RELU6:
2143 {
2144 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2145 activationDesc.m_A = 6.0f;
2146 activationDesc.m_B = 0.0f;
2147 layerName += ":RELU6";
2148 break;
2149 }
2150 case tflite::ActivationFunctionType_TANH:
2151 {
2152 activationDesc.m_Function = ActivationFunction::TanH;
2153 activationDesc.m_A = 1.0f;
2154 activationDesc.m_B = 1.0f;
2155 layerName += ":TANH";
2156 break;
2157 }
2158
2159 // I only put these here as a reminder what others we could support
2160 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2161 case tflite::ActivationFunctionType_SIGN_BIT:
2162 default:
2163 {
2164 throw ParseException(
2165 boost::str(
2166 boost::format("TfLite parser doesn't suppport fused activation: "
2167 "%1%/%2% %3% ") %
2168 activationType %
2169 tflite::EnumNameActivationFunctionType(activationType) %
2170 CHECK_LOCATION().AsString()));
2171
2172 }
2173 }
2174
2175 IConnectableLayer* activationLayer =
2176 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2177
2178 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2179 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2180 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2181 return activationLayer;
2182}
2183
2184TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2185{
2186 if (fileName == nullptr)
2187 {
2188 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2189 CHECK_LOCATION().AsString()));
2190 }
2191 boost::system::error_code errorCode;
2192 boost::filesystem::path pathToFile(fileName);
2193 if (!boost::filesystem::exists(pathToFile, errorCode))
2194 {
2195 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2196 fileName %
2197 errorCode %
2198 CHECK_LOCATION().AsString()));
2199 }
2200 std::ifstream file(fileName, std::ios::binary);
2201 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2202 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2203 fileContent.size());
2204}
2205
2206TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2207{
2208 if (binaryContent == nullptr)
2209 {
2210 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2211 CHECK_LOCATION().AsString()));
2212 }
2213 flatbuffers::Verifier verifier(binaryContent, len);
2214 if (verifier.VerifyBuffer<tflite::Model>() == false)
2215 {
2216 throw ParseException(
2217 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2218 "flatbuffers format. size:%1% %2%") %
2219 len %
2220 CHECK_LOCATION().AsString()));
2221 }
2222 return tflite::UnPackModel(binaryContent);
2223}
2224
2225TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2226 size_t subgraphIndex,
2227 size_t operatorIndex)
2228{
2229 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2230
Derek Lambertiff05cc52019-04-26 13:05:17 +01002231 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2232 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002233
2234 size_t inputCount = operatorPtr->inputs.size();
2235 TensorRawPtrVector result(inputCount);
2236 for (size_t i=0; i<inputCount; ++i)
2237 {
2238 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002239 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002240 }
2241 return result;
2242}
2243
2244TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2245 size_t subgraphIndex,
2246 size_t operatorIndex)
2247{
2248 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2249
Derek Lambertiff05cc52019-04-26 13:05:17 +01002250 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2251 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002252
2253 size_t outputCount = operatorPtr->outputs.size();
2254 TensorRawPtrVector result(outputCount);
2255 for (size_t i=0; i<outputCount; ++i)
2256 {
2257 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2258 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002259 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002260 }
2261 return result;
2262}
2263
2264TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2265 size_t subgraphIndex)
2266{
2267 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002268 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002269
Derek Lambertiff05cc52019-04-26 13:05:17 +01002270 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002271 TensorIdRawPtrVector result(inputCount);
2272 for (size_t i=0; i<inputCount; ++i)
2273 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002274 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002275 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002276 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002277 }
2278 return result;
2279}
2280
2281TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2282 size_t subgraphIndex)
2283{
2284 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002285 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002286
Derek Lambertiff05cc52019-04-26 13:05:17 +01002287 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002288 TensorIdRawPtrVector result(outputCount);
2289 for (size_t i=0; i<outputCount; ++i)
2290 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002291 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2292 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002293 }
2294 return result;
2295}
2296
2297std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2298 size_t subgraphIndex,
2299 size_t operatorIndex)
2300{
2301 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002302 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2303 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002304 return operatorPtr->inputs;
2305}
2306
2307std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2308 size_t subgraphIndex,
2309 size_t operatorIndex)
2310{
2311 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002312 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2313 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002314 return operatorPtr->outputs;
2315}
2316
2317void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2318 size_t operatorIndex,
2319 IConnectableLayer* layer,
2320 const std::vector<unsigned int>& tensorIndexes)
2321{
2322 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2323 BOOST_ASSERT(layer != nullptr);
2324 if (tensorIndexes.size() != layer->GetNumInputSlots())
2325 {
2326 throw ParseException(
2327 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2328 " for subgraph:%3% operator index:%4% %5%") %
2329 tensorIndexes.size() %
2330 layer->GetNumInputSlots() %
2331 subgraphIndex %
2332 operatorIndex %
2333 CHECK_LOCATION().AsString()));
2334 }
2335
2336 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2337 {
2338 unsigned int tensorIndex = tensorIndexes[slotIndex];
2339 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2340 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2341 }
2342}
2343
2344void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2345 size_t operatorIndex,
2346 IConnectableLayer* layer,
2347 const std::vector<unsigned int>& tensorIndexes)
2348{
2349 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2350 BOOST_ASSERT(layer != nullptr);
2351 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2352 {
2353 throw ParseException(
2354 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2355 " for subgraph:%3% operator index:%4% %5%") %
2356 tensorIndexes.size() %
2357 layer->GetNumOutputSlots() %
2358 subgraphIndex %
2359 operatorIndex %
2360 CHECK_LOCATION().AsString()));
2361 }
2362
2363 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2364 {
2365 unsigned int tensorIndex = tensorIndexes[slotIndex];
2366 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2367 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2368 }
2369}
2370
2371void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2372{
2373 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2374
2375 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2376 for (auto const & tensorIdAndPtr : inputs)
2377 {
2378 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2379 IConnectableLayer* layer =
2380 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2381
2382 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2383 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2384
2385 RegisterOutputSlots(subgraphIndex,
2386 VIRTUAL_OPERATOR_ID,
2387 layer,
2388 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2389 }
2390}
2391
2392void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2393{
2394 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2395
2396 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2397 for (auto const & tensorIdAndPtr : outputs)
2398 {
2399 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2400 IConnectableLayer* layer =
2401 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2402
2403 RegisterInputSlots(subgraphIndex,
2404 VIRTUAL_OPERATOR_ID,
2405 layer,
2406 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2407 }
2408}
2409
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002410void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2411{
2412 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2413
Derek Lambertiff05cc52019-04-26 13:05:17 +01002414 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002415 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2416 {
2417 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2418 {
2419 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2420 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2421 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002422 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002423 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2424 auto tensorAndData = CreateConstTensor(tensorPtr,
2425 tensorInfo,
2426 armnn::Optional<armnn::PermutationVector&>());
2427
2428 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2429 IConnectableLayer *layer =
2430 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2431
2432 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2433 RegisterOutputSlots(subgraphIndex,
2434 VIRTUAL_OPERATOR_ID,
2435 layer,
2436 { tensorIndex });
2437
2438 }
2439 }
2440 }
2441}
2442
telsoa01c577f2c2018-08-31 09:22:23 +01002443// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2444TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2445{
2446 CHECK_BUFFER(model, bufferIndex);
2447 return model->buffers[bufferIndex].get();
2448}
2449
Matteo Martincigh747ef822018-12-18 09:26:39 +00002450template<typename T>
2451std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2452TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2453 TfLiteParser::TensorRawPtr tensorPtr,
2454 armnn::TensorInfo& tensorInfo,
2455 armnn::Optional<armnn::PermutationVector&> permutationVector)
2456{
2457 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2458 tensorPtr,
2459 tensorInfo,
2460 permutationVector);
2461 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2462 return std::make_pair(constData.first, std::move(storage));
2463}
2464
telsoa01c577f2c2018-08-31 09:22:23 +01002465std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2466TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002467 armnn::TensorInfo& tensorInfo,
2468 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002469{
2470 CHECK_TENSOR_PTR(tensorPtr);
2471 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2472 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2473
2474 switch (tensorInfo.GetDataType())
2475 {
2476 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002477 return CreateConstTensorAndStoreData<float>(bufferPtr,
2478 tensorPtr,
2479 tensorInfo,
2480 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002481 case armnn::DataType::QuantisedAsymm8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002482 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2483 tensorPtr,
2484 tensorInfo,
2485 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002486 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002487 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2488 tensorPtr,
2489 tensorInfo,
2490 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002491 default:
2492 {
2493 std::stringstream errString;
2494 errString << "Unexpected datatype when creating const tensor: "
2495 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2496 << " shape:" << tensorInfo.GetShape()
2497 << CHECK_LOCATION().AsString();
2498 throw ParseException(errString.str());
2499 }
2500 }
2501}
2502
2503BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2504 const std::string& name) const
2505{
2506 CHECK_SUBGRAPH(m_Model, subgraphId);
2507 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2508 for (auto const & input : inputs)
2509 {
2510 if (input.second->name == name)
2511 {
2512 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2513 return std::make_pair(bindingId, ToTensorInfo(input.second));
2514 }
2515 }
2516
2517 std::stringstream bindings;
2518 for (auto const & input : inputs)
2519 {
2520 bindings << "'" << input.second->name << "' ";
2521 }
2522
2523 throw ParseException(
2524 boost::str(
2525 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2526 "Possible inputs are: [%3%] %4%") %
2527 subgraphId %
2528 name %
2529 bindings.str() %
2530 CHECK_LOCATION().AsString()));
2531}
2532
2533BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2534 const std::string& name) const
2535{
2536 CHECK_SUBGRAPH(m_Model, subgraphId);
2537 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002538 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002539 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002540 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002541 if (output.second->name == name)
2542 {
2543 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002544 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2545 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2546 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002547 }
2548 }
2549
2550 std::stringstream bindings;
2551 for (auto const & output : outputs)
2552 {
2553 bindings << "'" << output.second->name << "' ";
2554 }
2555
2556 throw ParseException(
2557 boost::str(
2558 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2559 "Possible outputs are: [%3%] %4%") %
2560 subgraphId %
2561 name %
2562 bindings.str() %
2563 CHECK_LOCATION().AsString()));
2564}
2565
2566size_t TfLiteParser::GetSubgraphCount() const
2567{
2568 return m_Model->subgraphs.size();
2569}
2570
2571std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2572{
2573 CHECK_SUBGRAPH(m_Model, subgraphId);
2574 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2575 std::vector<std::string> result;
2576 result.reserve(inputs.size());
2577 for (auto const & input : inputs)
2578 {
2579 result.push_back(input.second->name);
2580 }
2581 return result;
2582}
2583
2584std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2585{
2586 CHECK_SUBGRAPH(m_Model, subgraphId);
2587 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2588 std::vector<std::string> result;
2589 result.reserve(outputs.size());
2590 for (auto const & output : outputs)
2591 {
2592 result.push_back(output.second->name);
2593 }
2594 return result;
2595}
2596
2597ITfLiteParser* ITfLiteParser::CreateRaw()
2598{
2599 return new TfLiteParser();
2600}
2601
2602ITfLiteParserPtr ITfLiteParser::Create()
2603{
2604 return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
2605}
2606
2607void ITfLiteParser::Destroy(ITfLiteParser* parser)
2608{
2609 delete parser;
2610}
2611
2612TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
2613: m_FloatData(std::move(data))
2614, m_Uint8Data(nullptr)
2615, m_Int32Data(nullptr)
2616{
2617}
2618
2619TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
2620: m_FloatData(nullptr)
2621, m_Uint8Data(std::move(data))
2622, m_Int32Data(nullptr)
2623{
2624}
2625
2626TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
2627: m_FloatData(nullptr)
2628, m_Uint8Data(nullptr)
2629, m_Int32Data(std::move(data))
2630{
2631}
2632
2633} // armnnTfLiteParser