blob: d3eed9cfb129d776f706bfef459f53ef92da90b8 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Bentham39ef3e52020-01-20 10:09:09 +00008#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +01009#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000010#include <armnn/Logging.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010011#include <armnn/TypesUtils.hpp>
12#include <boost/filesystem.hpp>
13
14// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000015#include <armnnUtils/Permute.hpp>
16
Sadik Armagan479045b2018-10-01 11:51:37 +010017#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010018#include <VerificationHelpers.hpp>
19
20// The generated code based on the Tf Lite schema:
21#include <schema_generated.h>
22
Matteo Martincighe011d202019-11-28 11:35:47 +000023#include <flatbuffers/flexbuffers.h>
24
telsoa01c577f2c2018-08-31 09:22:23 +010025#include <boost/core/ignore_unused.hpp>
26#include <boost/assert.hpp>
27#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010028#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010029
30#include <fstream>
31#include <algorithm>
32#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010033#include <numeric>
telsoa01c577f2c2018-08-31 09:22:23 +010034
35using namespace armnn;
36using armnn::CheckLocation;
37namespace armnnTfLiteParser
38{
39namespace
40{
jimfly01c25411c2018-11-14 17:47:22 +000041
telsoa01c577f2c2018-08-31 09:22:23 +010042const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
43
44void CheckSubgraph(const TfLiteParser::ModelPtr & model,
45 size_t subgraphIndex,
46 const CheckLocation & location)
47{
48 if (model.get() == nullptr)
49 {
50 throw ParseException(
51 boost::str(
52 boost::format("%1% was called with invalid (null) model. "
53 "Possible reason is that the model is not yet loaded and Unpack(ed). "
54 "subgraph:%2% at %3%") %
55 location.m_Function %
56 subgraphIndex %
57 location.FileLine()));
58 }
59 else if (subgraphIndex >= model->subgraphs.size())
60 {
61 throw ParseException(
62 boost::str(
63 boost::format("%1% was called with an invalid subgraph index. "
64 "subgraph:%2% at %3%") %
65 location.m_Function %
66 subgraphIndex %
67 location.FileLine()));
68 }
69}
70
71#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
72 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
73
74void CheckModel(const TfLiteParser::ModelPtr & model,
75 size_t subgraphIndex,
76 size_t operatorIndex,
77 const CheckLocation & location)
78{
79 if (model.get() == nullptr)
80 {
81 throw ParseException(
82 boost::str(
83 boost::format("%1% was called with invalid (null) model. "
84 "Possible reason is that the model is not yet loaded and Unpack(ed). "
85 "subgraph:%2% operator:%3% at %4%") %
86 location.m_Function %
87 subgraphIndex %
88 operatorIndex %
89 location.FileLine()));
90 }
91 else if (subgraphIndex >= model->subgraphs.size())
92 {
93 throw ParseException(
94 boost::str(
95 boost::format("%1% was called with an invalid subgraph index. "
96 "subgraph:%2% operator:%3% at %4%") %
97 location.m_Function %
98 subgraphIndex %
99 operatorIndex %
100 location.FileLine()));
101 }
102 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
103 operatorIndex != VIRTUAL_OPERATOR_ID)
104 {
105 throw ParseException(
106 boost::str(
107 boost::format("%1% was called with an invalid operator index. "
108 "subgraph:%2% operator:%3% at %4%") %
109 location.m_Function %
110 subgraphIndex %
111 operatorIndex %
112 location.FileLine()));
113 }
114}
115
116#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
117 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
118
119void CheckTensor(const TfLiteParser::ModelPtr & model,
120 size_t subgraphIndex,
121 size_t tensorIndex,
122 const CheckLocation & location)
123{
124 // not checking model, because I assume CHECK_MODEL already run
125 // and checked that. An assert would do.
126 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
127
128 // also subgraph index should be checked by CHECK_MODEL so
129 // I only add an assert here
130 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
131
132 // the tensor index is the only one to check here
133 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
134 {
135 throw ParseException(
136 boost::str(
137 boost::format("%1% was called with an invalid tensor index. "
138 "subgraph:%2% tensor:%3% at %4%") %
139 location.m_Function %
140 subgraphIndex %
141 tensorIndex %
142 location.FileLine()));
143 }
144}
145
146#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
147 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
148
149void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
150 const CheckLocation & location)
151{
152 if (rawPtr == nullptr)
153 {
154 throw ParseException(
155 boost::str(
156 boost::format("%1% was called with a null tensor pointer. "
157 "at %2%") %
158 location.m_Function %
159 location.FileLine()));
160
161 }
162}
163
164#define CHECK_TENSOR_PTR(TENSOR_PTR) \
165 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
166
167void CheckBuffer(const TfLiteParser::ModelPtr & model,
168 size_t bufferIndex,
169 const CheckLocation & location)
170{
171 if (model.get() == nullptr)
172 {
173 throw ParseException(
174 boost::str(
175 boost::format("%1% was called with invalid (null) model. "
176 "Possible reason is that the model is not yet loaded and Unpack(ed). "
177 "buffer:%2% at %3%") %
178 location.m_Function %
179 bufferIndex %
180 location.FileLine()));
181 }
182 else if (bufferIndex >= model->buffers.size())
183 {
184 throw ParseException(
185 boost::str(
186 boost::format("%1% was called with an invalid buffer index. "
187 "buffer index:%2% at %3%") %
188 location.m_Function %
189 bufferIndex %
190 location.FileLine()));
191 }
192 else if (model->buffers[bufferIndex].get() == nullptr)
193 {
194 throw ParseException(
195 boost::str(
196 boost::format("The buffer #%1% is null. %3%") %
197 bufferIndex %
198 location.AsString()));
199 }
200}
201
202#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
203 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
204
205void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
206 const armnn::TensorInfo & tensorInfo,
207 uint32_t bufferId,
208 const CheckLocation & location)
209{
210 if (bufferPtr == nullptr)
211 {
212 throw ParseException(
213 boost::str(
214 boost::format("BufferPtr is null for buffer:%1%. %2%") %
215 bufferId %
216 location.AsString()));
217 }
218 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
219 tensorInfo.GetNumBytes() > bufferPtr->data.size())
220 {
221 std::stringstream ss;
222 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
223 << "For tensor: " << tensorInfo.GetShape()
224 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
225 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
226 throw ParseException(ss.str());
227 }
228}
229
230#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
231 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
232
233bool IsActivationSupported(tflite::ActivationFunctionType activationType)
234{
235 switch(activationType)
236 {
237 case tflite::ActivationFunctionType_NONE:
238 case tflite::ActivationFunctionType_RELU:
239 case tflite::ActivationFunctionType_RELU6:
240 case tflite::ActivationFunctionType_TANH:
241 {
242 return true;
243 }
244 default:
245 {
246 return false;
247 }
248 }
249}
250
251#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
252 do { \
253 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
254 { \
255 throw ParseException( \
256 boost::str( \
257 boost::format("TfLite parser doesn't suppport fused activation: " \
258 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
259 OPTION->fused_activation_function % \
260 tflite::EnumNameActivationFunctionType(\
261 OPTION->fused_activation_function) % \
262 __func__ % \
263 SUBGRAPH_INDEX % \
264 OPERATOR_INDEX % \
265 CHECK_LOCATION().FileLine())); \
266 } \
267 } while(false)
268
269
270std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
271{
272 std::vector<unsigned int> result;
273 result.reserve(in.size());
274 for (auto & i : in)
275 {
276 result.push_back(CHECKED_NON_NEGATIVE(i));
277 }
278 return result;
279}
280
281void CalcPadding(uint32_t inputSize,
282 uint32_t filterSize,
283 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100284 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100285 uint32_t& paddingFront,
286 uint32_t& paddingBack,
287 tflite::Padding padding)
288{
289 paddingFront = 0;
290 paddingBack = 0;
291 if (padding == tflite::Padding_SAME)
292 {
293 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100294 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
295 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100296 if (temp > inputSize)
297 {
298 paddingFront = (temp - inputSize) / 2;
299 paddingBack = (temp - inputSize) - paddingFront;
300 }
301 }
302}
303
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000304armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes)
telsoa01c577f2c2018-08-31 09:22:23 +0100305{
306 armnn::DataType type;
307 CHECK_TENSOR_PTR(tensorPtr);
308
309 switch (tensorPtr->type)
310 {
311 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000312 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100313 break;
314 case tflite::TensorType_FLOAT32:
315 type = armnn::DataType::Float32;
316 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000317 case tflite::TensorType_INT8:
318 type = armnn::DataType::QSymmS8;
319 break;
320 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000321 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000322 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100323 case tflite::TensorType_INT32:
324 type = armnn::DataType::Signed32;
325 break;
326
327 default:
328 {
329 CheckLocation location = CHECK_LOCATION();
330 throw ParseException(
331 boost::str(
332 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
333 tensorPtr->type %
334 tflite::EnumNameTensorType(tensorPtr->type) %
335 tensorPtr->name %
336 location.AsString()));
337 }
338 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100339 std::vector<unsigned int> safeShape = shapes;
340 if (safeShape.size() == 0)
341 {
342 safeShape.push_back(1);
343 }
344
Keith Davisd305e1a2020-01-22 11:57:54 +0000345 float quantizationScale = 0.0f;
346 int32_t quantizationOffset = 0;
347
348 if (tensorPtr->quantization.get())
349 {
350 if (tensorPtr->quantization->scale.size() <= 1)
351 {
352 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
353 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
354
355 if (tensorPtr->quantization->scale.size() == 1)
356 {
357 quantizationScale = tensorPtr->quantization->scale[0];
358 }
359 if (tensorPtr->quantization->zero_point.size() == 1)
360 {
361 // NOTE: we lose precision here when converting from 64 bit to 32
362 // but this is what we support at the monent in ArmNN
363 quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
364 }
365
366 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
367 safeShape.data(),
368 type,
369 quantizationScale,
370 quantizationOffset);
371
372 return result;
373 }
374 else
375 {
376 std::vector<float> quantizationScales;
377 std::vector<int32_t> quantizationOffsets;
378
379 // Scale
380 std::copy(tensorPtr->quantization->scale.begin(),
381 tensorPtr->quantization->scale.end(),
382 std::back_inserter(quantizationScales));
383
384 // QSymm Per-axis
385 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
386 safeShape.data(),
387 type,
388 quantizationScales,
389 boost::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
390
391 return result;
392 }
393 }
394 else
395 {
396 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
397 safeShape.data(),
398 type,
399 quantizationScale,
400 quantizationOffset);
401 return result;
402 }
telsoa01c577f2c2018-08-31 09:22:23 +0100403}
404
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000405armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr)
406{
407 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
408 return ToTensorInfo(tensorPtr, dimensions);
409}
410
telsoa01c577f2c2018-08-31 09:22:23 +0100411template<typename T>
412std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
413CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
414 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000415 armnn::TensorInfo& tensorInfo,
416 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100417{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000418 boost::ignore_unused(tensorPtr);
telsoa01c577f2c2018-08-31 09:22:23 +0100419 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
420 BOOST_ASSERT_MSG(bufferPtr != nullptr,
421 boost::str(
422 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
423
424 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000425
426 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
427 {
428 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000429 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
430 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000431 }
432 else
433 {
434 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
435 }
436
telsoa01c577f2c2018-08-31 09:22:23 +0100437 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
438}
439
telsoa01c577f2c2018-08-31 09:22:23 +0100440armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
441{
442 // generate the binding id by shifting the tensor id by 8 bit
443 // and add the subgraph id, which allows 256 subgraphs
444 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
445}
446
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000447bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
448{
449 const unsigned int actualSize = actual.GetNumDimensions();
450 if (actualSize != expected.size())
451 {
452 return false;
453 }
454
455 for (unsigned int i = 0u; i < actualSize; i++)
456 {
457 if (expected[i] < 0 ||
458 actual[i] != static_cast<unsigned int>(expected[i]))
459 {
460 return false;
461 }
462 }
463
464 return true;
465}
466
telsoa01c577f2c2018-08-31 09:22:23 +0100467} // <anonymous>
468
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100469TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
470: m_Options(options)
471, m_Network(nullptr, nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +0100472, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
473{
474 // register supported operators
Sadik Armagan66dedc72019-12-10 16:32:07 +0000475 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000476 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
477 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
478 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
479 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000480 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000481 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
Finn Williamsed66d142019-12-06 09:55:55 +0000482 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000483 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
484 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
485 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
486 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
487 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000488 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000489 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000490 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
491 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
492 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
493 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000494 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
495 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
496 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
497 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
498 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000499 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000500 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
501 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000502 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000503 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
504 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
505 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000506 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
507 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
508 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
509 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100510
511 // register supported custom operators
512 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100513}
514
515void TfLiteParser::ResetParser()
516{
517 m_Network = armnn::INetworkPtr(nullptr, nullptr);
518 m_Model = nullptr;
519 m_SubgraphConnections.clear();
520}
521
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200522void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
523 size_t operatorIndex,
524 IConnectableLayer *layer)
525{
526 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
527 BOOST_ASSERT(layer != nullptr);
528
Derek Lambertiff05cc52019-04-26 13:05:17 +0100529 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
530 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200531
532 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
533
534 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100535 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200536 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100537 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200538
539 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
540 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
541
542 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
543 {
544 uint32_t id = reshapedInputId;
545 reshapedInputId = inputId;
546 inputId = id;
547
548 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
549 inputTensorInfo = ToTensorInfo(tensorPtr);
550 }
551
552 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
553
554 std::vector<unsigned> reshapedDim;
555 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
556 {
557 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
558 }
559
560 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
561 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
562
563 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
564
565 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
566 armnn::ReshapeDescriptor desc;
567 desc.m_TargetShape = reshapedTensorInfo.GetShape();
568 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
569
570 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
571 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
572
573 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
574
575 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
576 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
577}
578
telsoa01c577f2c2018-08-31 09:22:23 +0100579INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
580{
581 ResetParser();
582 m_Model = LoadModelFromFile(graphFile);
583 return CreateNetworkFromModel();
584}
585
586INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
587{
588 ResetParser();
589 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
590 return CreateNetworkFromModel();
591}
592
593INetworkPtr TfLiteParser::CreateNetworkFromModel()
594{
595 m_Network = INetwork::Create();
596 BOOST_ASSERT(m_Model.get() != nullptr);
597
598 bool failedToCreate = false;
599 std::stringstream errors;
600
601 if (m_Model->subgraphs.size() != 1)
602 {
603 throw ParseException(
604 boost::str(
605 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
606 m_Model->subgraphs.size() %
607 CHECK_LOCATION().AsString()));
608 }
609
610 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100611 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100612 {
613 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
614
615 size_t operatorIndex = 0;
616 for (OperatorPtr const & op : subgraph->operators)
617 {
618 try
619 {
telsoa01c577f2c2018-08-31 09:22:23 +0100620 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
621 auto builtinCode = opCodePtr->builtin_code;
622
623 if (builtinCode > tflite::BuiltinOperator_MAX)
624 {
625 throw ParseException(
626 boost::str(
627 boost::format("Operator code %1% is out of range 0-%2%. "
628 "subgraph:%3% operator idx:%4%. %5%") %
629 builtinCode %
630 tflite::BuiltinOperator_MAX %
631 subgraphIndex %
632 operatorIndex %
633 CHECK_LOCATION().AsString()));
634 }
635
636 // lookup and call the parser function
637 auto & parserFunction = m_ParserFunctions[builtinCode];
638 (this->*parserFunction)(subgraphIndex, operatorIndex);
639 }
640 catch (const ParseException& e)
641 {
642 failedToCreate = true;
643 std::stringstream errorString;
644
645 errorString << "Failed to parse operator #" << operatorIndex
646 << " within subgraph #" << subgraphIndex
647 << " error: " << e.what();
Derek Lamberti08446972019-11-26 16:38:31 +0000648 ARMNN_LOG(error) << errorString.str();
telsoa01c577f2c2018-08-31 09:22:23 +0100649
650 errors << errorString.str() << "\n";
651 }
652 ++operatorIndex;
653 }
654
655 SetupInputLayers(subgraphIndex);
656 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200657 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100658
659 ++subgraphIndex;
660 }
661
662 if (failedToCreate)
663 {
664 // we can skip everything and let the outer exception handler deal with the error
665 throw ParseException(errors.str());
666 }
667
668 // establish the connections from the layer outputs to the inputs of the subsequent layers
669 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
670 {
671 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
672 {
673 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
674 {
675 for (size_t inputSlotIdx = 0;
676 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
677 ++inputSlotIdx)
678 {
679 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
680 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
681 }
682 }
683 }
684 }
685
686 return std::move(m_Network);
687}
688
689void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
690 size_t tensorIndex,
691 armnn::IOutputSlot* slot)
692{
693 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
694 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
695 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
696
697 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
698
699 // assuming there is only one producer for that tensor
700 if (tensorSlots.outputSlot != nullptr)
701 {
702 throw ParseException(boost::str(
703 boost::format("Another layer has already registered itself as the producer of "
704 "subgraph:%1% tensor:%2% %3%") %
705 subgraphIndex %
706 tensorIndex %
707 CHECK_LOCATION().AsString()));
708 }
709
710 tensorSlots.outputSlot = slot;
711}
712
713void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
714 size_t tensorIndex,
715 armnn::IInputSlot* slot)
716{
717 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
718 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
719 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
720
721 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
722 tensorSlots.inputSlots.push_back(slot);
723}
724
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100725void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
726{
727 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
728
729 // NOTE: By default we presume the custom operator is not supported
730 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
731
732 // Identify custom code defined for custom operator
733 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
734 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
735
736 // Find parser function that correspondes to custom code (if any)
737 auto iterator = m_CustomParserFunctions.find(customCode);
738 if (iterator != m_CustomParserFunctions.end())
739 {
740 customParserFunction = iterator->second;
741 }
742
743 // Run parser function
744 (this->*customParserFunction)(subgraphIndex, operatorIndex);
745}
746
telsoa01c577f2c2018-08-31 09:22:23 +0100747void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
748{
749 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100750
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100751 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
752
753 auto opcodeIndex = operatorPtr->opcode_index;
754 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
755
756 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
757 {
758 // Do not add StandInLayer, throw ParseException instead
759 throw ParseException(
760 boost::str(
761 boost::format("Operator not supported. "
762 "subgraph:%1% operator:%2% "
763 "opcode_index:%3% opcode:%4% / %5% %6%") %
764 subgraphIndex %
765 operatorIndex %
766 opcodeIndex %
767 opcode %
768 tflite::EnumNameBuiltinOperator(opcode) %
769 CHECK_LOCATION().AsString()));
770 }
771
772 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
773 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
774
775 const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
776 const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
777
778 StandInDescriptor descriptor(numInputs, numOutputs);
779 auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
780
781 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
782 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
783 for (unsigned int i = 0u; i < numOutputs; ++i)
784 {
785 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i]));
786 }
787
788 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
789 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
790
791 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
792 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100793}
794
telsoa01c577f2c2018-08-31 09:22:23 +0100795void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
796{
797 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
798
799 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
800 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
801
802 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
803
804 Convolution2dDescriptor desc;
805 desc.m_BiasEnabled = false;
806 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
807 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000808 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100809 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
810 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000811
telsoa01c577f2c2018-08-31 09:22:23 +0100812 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
813 CHECK_VALID_SIZE(inputs.size(), 2, 3);
814
815 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
816 CHECK_VALID_SIZE(outputs.size(), 1);
817
818 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
819 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
820
821 // assuming input is NHWC
822 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
823 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
824
825 // assuming the filter is OHWI : Output, H, W, Input
826 // which is essentially the same as NHWC
827 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
828 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
829
Pablo Tellof0bd6832019-04-26 17:58:13 +0100830 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
831 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
832 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
833 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100834
Matteo Martincigh747ef822018-12-18 09:26:39 +0000835 auto filterTensorAndData = CreateConstTensor(inputs[1],
836 filterTensorInfo,
837 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100838 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100839
840 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
841
842 if (inputs.size() == 3)
843 {
844 desc.m_BiasEnabled = true;
845 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000846 auto biasTensorAndData = CreateConstTensor(inputs[2],
847 biasTensorInfo,
848 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100849 layer = m_Network->AddConvolution2dLayer(desc,
850 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100851 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100852 layerName.c_str());
853 }
854 else
855 {
856 layer = m_Network->AddConvolution2dLayer(desc,
857 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100858 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100859 layerName.c_str());
860 }
861
862 BOOST_ASSERT(layer != nullptr);
863
telsoa01c577f2c2018-08-31 09:22:23 +0100864 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000865 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100866
867 // register the input connection slots for the layer, connections are made after all layers have been created
868 // only the tensors for the inputs are relevant, exclude the const tensors
869 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000870 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100871
jimfly01c25411c2018-11-14 17:47:22 +0000872 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100873 // register the output connection slots for the layer, connections are made after all layers have been created
874 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
875 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
876}
877
878void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
879{
880 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
881
882 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
883 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
884
885 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
886
887 DepthwiseConvolution2dDescriptor desc;
888 desc.m_BiasEnabled = false;
889 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
890 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000891 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100892 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100893
894 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
895 CHECK_VALID_SIZE(inputs.size(), 2, 3);
896 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
897 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100898 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
899 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000900
telsoa01c577f2c2018-08-31 09:22:23 +0100901 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
902 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
903
Matteo Martincigh747ef822018-12-18 09:26:39 +0000904 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100905 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
906 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000907
908 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100909 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
910 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
911
Matteo Martincigh747ef822018-12-18 09:26:39 +0000912 // Reshape weights as [ H, W, I, M ]
913 filterTensorInfo.SetShape({ filterHeight,
914 filterWidth,
915 inputTensorInfo.GetShape()[3],
916 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
917
918 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
919 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
920
Pablo Tellof0bd6832019-04-26 17:58:13 +0100921 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
922 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
923 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
924 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100925
Matteo Martincigh747ef822018-12-18 09:26:39 +0000926 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100927 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100928 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
929
930 if (inputs.size() == 3)
931 {
932 desc.m_BiasEnabled = true;
933 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000934 auto biasTensorAndData = CreateConstTensor(inputs[2],
935 biasTensorInfo,
936 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100937 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
938 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100939 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100940 layerName.c_str());
941 }
942 else
943 {
944 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
945 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100946 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100947 layerName.c_str());
948 }
949 BOOST_ASSERT(layer != nullptr);
950
telsoa01c577f2c2018-08-31 09:22:23 +0100951 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000952 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100953
954 // register the input connection slots for the layer, connections are made after all layers have been created
955 // only the tensors for the inputs are relevant, exclude the const tensors
956 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000957 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100958
jimfly01c25411c2018-11-14 17:47:22 +0000959 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100960 // register the output connection slots for the layer, connections are made after all layers have been created
961 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
962 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
963}
964
Finn Williamsed66d142019-12-06 09:55:55 +0000965void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
966{
967 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
968
969 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
970 CHECK_VALID_SIZE(inputs.size(), 1);
971
972 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
973 CHECK_VALID_SIZE(outputs.size(), 1);
974
975 auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
976
977 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
978 BOOST_ASSERT(layer != nullptr);
979
980 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
981 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
982
983 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
984 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
985
986 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
987 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
988}
989
Keith Davis4cd29a02019-09-09 14:49:20 +0100990void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
991{
992 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
993
994 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +0100995 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +0100996
997 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
998 CHECK_VALID_SIZE(outputs.size(), 1);
999
1000 armnn::IConnectableLayer* layer = nullptr;
1001 auto layerName = boost::str(boost::format("Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
1002
1003 PermuteDescriptor desc;
1004
josh minorba424d22019-11-13 10:55:17 -06001005 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001006 {
1007 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1008 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001009 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1010 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001011 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
1012
josh minorba424d22019-11-13 10:55:17 -06001013 // permuteShape assumes Tf/Np permute vectors, we must translate to armnn expected form
1014 // to do so we find the perm vector which would invert what a tf perm vector would do (ex 3,0,1,2 -> 1,2,3,0)
1015 std::vector<unsigned int> armnnPermuteShape(numPermVecElements);
1016 std::vector<unsigned int>::iterator it;
1017 for (unsigned int i = 0u; i < numPermVecElements; ++i)
1018 {
1019 it = std::find(permuteShape.begin(), permuteShape.end(), i);
1020 armnnPermuteShape[i] = static_cast<unsigned int>(std::distance(permuteShape.begin(), it));
1021 }
Kevin May85d92602019-09-27 17:21:06 +01001022
josh minorba424d22019-11-13 10:55:17 -06001023 PermutationVector permutationVector(armnnPermuteShape.data(), permuteTensorInfo.GetNumElements());
1024
1025 desc = PermuteDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001026 }
1027
Keith Davis4cd29a02019-09-09 14:49:20 +01001028 layer = m_Network->AddPermuteLayer(desc, layerName.c_str());
1029
1030 BOOST_ASSERT(layer != nullptr);
1031
1032 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1033 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1034
1035 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1036 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1037
1038 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1039 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1040}
1041
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001042void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
1043{
1044 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1045
1046 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1047 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1048
1049 TransposeConvolution2dDescriptor desc;
1050 desc.m_BiasEnabled = false;
1051 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1052 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1053 desc.m_DataLayout = armnn::DataLayout::NHWC;
1054
1055 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001056 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001057
1058 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1059 CHECK_VALID_SIZE(outputs.size(), 1);
1060
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001061 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001062 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1063
1064 // TfLite uses NHWC tensors
1065 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1066 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1067
1068 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1069 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1070
1071 CalcPadding(inputHeight,
1072 filterHeight,
1073 desc.m_StrideY,
1074 1, // DilationY
1075 desc.m_PadTop,
1076 desc.m_PadBottom,
1077 options->padding);
1078
1079 CalcPadding(inputWidth,
1080 filterWidth,
1081 desc.m_StrideX,
1082 1, // DilationX
1083 desc.m_PadLeft,
1084 desc.m_PadRight,
1085 options->padding);
1086
1087 auto filterTensorAndData = CreateConstTensor(inputs[1],
1088 filterTensorInfo,
1089 armnn::Optional<armnn::PermutationVector&>());
1090
1091 armnn::IConnectableLayer* layer = nullptr;
1092 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
1093
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001094 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1095 filterTensorAndData.first,
1096 EmptyOptional(),
1097 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001098
1099 BOOST_ASSERT(layer != nullptr);
1100
1101 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1102 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1103
1104 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1105 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001106 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001107
1108 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1109 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1110}
1111
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001112void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
1113{
1114 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1115}
1116
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001117void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
1118{
1119 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1120
1121 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1122 CHECK_VALID_SIZE(inputs.size(), 3);
1123
1124 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1125 CHECK_VALID_SIZE(outputs.size(), 1);
1126
1127 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1128 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1129
1130 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1131 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1132
1133 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1134 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1135
1136 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1137 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1138
1139 size_t step = 2;
1140 std::vector<std::pair<unsigned int, unsigned int>> crops;
1141 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1142 {
1143 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1144 }
1145
1146 armnn::BatchToSpaceNdDescriptor desc;
1147 desc.m_BlockShape = blockShape;
1148 desc.m_Crops = crops;
1149 desc.m_DataLayout = armnn::DataLayout::NHWC;
1150
1151 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1152
1153 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1154 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1155
1156 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1157
1158 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1159 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1160
1161 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1162 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1163}
1164
Matthew Jackson28c94572019-07-18 10:47:03 +01001165void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1166{
1167 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1168
1169 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1170 CHECK_VALID_SIZE(inputs.size(), 1);
1171
1172 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1173 CHECK_VALID_SIZE(outputs.size(), 1);
1174
1175 L2NormalizationDescriptor desc;
1176 desc.m_DataLayout = armnn::DataLayout::NHWC;
1177 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1178 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1179
1180 BOOST_ASSERT(layer != nullptr);
1181
1182 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1183 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1184
1185 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1186 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1187
1188 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1189 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1190}
1191
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001192void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1193{
1194 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1195}
1196
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001197void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1198{
1199 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1200
1201 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1202 CHECK_VALID_SIZE(inputs.size(), 2);
1203
1204 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1205 CHECK_VALID_SIZE(outputs.size(), 1);
1206
1207 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1208 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1209
1210 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1211 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1212
1213 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1214 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1215
1216 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1217 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1218 {
1219 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1220 }
1221 else
1222 {
1223 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1224 }
1225
1226 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1227 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1228}
1229
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001230void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1231{
1232 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1233
1234 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1235 CHECK_VALID_SIZE(inputs.size(), 2);
1236
1237 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1238 CHECK_VALID_SIZE(outputs.size(), 1);
1239
1240 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1241 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1242
1243 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1244 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1245
1246 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1247 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1248
1249 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1250 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1251 {
1252 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1253 }
1254 else
1255 {
1256 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1257 }
1258
1259 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1260 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1261}
1262
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001263void TfLiteParser::ParsePool(size_t subgraphIndex,
1264 size_t operatorIndex,
1265 PoolingAlgorithm algorithm)
1266{
1267 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1268
1269 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1270 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1271
1272 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1273
1274 std::string layerName;
1275
1276 switch (algorithm)
1277 {
1278 case PoolingAlgorithm::Average:
1279 layerName =
1280 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1281 break;
1282 case PoolingAlgorithm::Max:
1283 layerName =
1284 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1285 break;
1286 default:
1287 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1288 }
1289
1290 Pooling2dDescriptor desc;
1291
1292 desc.m_PoolType = algorithm;
1293 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1294 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1295 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1296 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1297 desc.m_PaddingMethod = PaddingMethod::Exclude;
1298 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001299 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001300
1301 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1302 CHECK_VALID_SIZE(inputs.size(), 1);
1303 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1304
1305 // assuming input is NHWC
1306 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1307 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1308
Pablo Tellof0bd6832019-04-26 17:58:13 +01001309 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1310 desc.m_PadTop, desc.m_PadBottom, options->padding);
1311 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1312 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001313
1314 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1315 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001316
1317 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1318
1319 BOOST_ASSERT(layer != nullptr);
1320
jimfly01c25411c2018-11-14 17:47:22 +00001321 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1322 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001323
1324 // register the input connection slots for the layer, connections are made after all layers have been created
1325 // only the tensors for the inputs are relevant, exclude the const tensors
1326 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001327 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001328
jimfly01c25411c2018-11-14 17:47:22 +00001329 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001330 // register the output connection slots for the layer, connections are made after all layers have been created
1331 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1332 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1333}
1334
josh minorba424d22019-11-13 10:55:17 -06001335void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
1336{
1337 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1338
1339 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1340 CHECK_VALID_SIZE(inputs.size(), 3);
1341 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1342 CHECK_VALID_SIZE(outputs.size(), 1);
1343
1344 SliceDescriptor desc;
1345
1346 // set begin tensor info for slice descriptor
1347 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1348 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1349
1350 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1351 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1352
1353 // set size tensor info for slice descriptor
1354 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1355 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1356
1357 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1358 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1359 desc = SliceDescriptor(begin, size);
1360
1361 auto layerName = boost::str(boost::format("Slice:%1%:%2%") % subgraphIndex % operatorIndex);
1362 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
1363
1364 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1365 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1366
1367 // register the input connection slots for the layer, connections are made after all layers have been created
1368 // only the tensors for the inputs are relevant, exclude the const tensors
1369 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1370 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1371
1372 // register the output connection slots for the layer, connections are made after all layers have been created
1373 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1374 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1375}
1376
telsoa01c577f2c2018-08-31 09:22:23 +01001377void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1378{
1379 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1380 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1381 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1382
1383 SoftmaxDescriptor desc;
1384 desc.m_Beta = options->beta;
1385
1386 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1387 CHECK_VALID_SIZE(inputs.size(), 1);
1388 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1389 CHECK_VALID_SIZE(outputs.size(), 1);
1390
1391 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1392 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1393
1394 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1395 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1396
1397 // register the input connection slots for the layer, connections are made after all layers have been created
1398 // only the tensors for the inputs are relevant, exclude the const tensors
1399 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1400 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1401
1402 // register the output connection slots for the layer, connections are made after all layers have been created
1403 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1404 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1405}
1406
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001407void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1408{
1409 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1410
1411 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1412 CHECK_VALID_SIZE(inputs.size(), 3);
1413
1414 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1415 CHECK_VALID_SIZE(outputs.size(), 1);
1416
1417 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1418 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1419
1420 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1421 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1422
1423 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1424 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1425
1426 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1427 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1428
1429 size_t step = 2;
1430 std::vector<std::pair<unsigned int, unsigned int>> padList;
1431 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1432 {
1433 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1434 }
1435
1436 armnn::SpaceToBatchNdDescriptor desc;
1437 desc.m_BlockShape = blockShape;
1438 desc.m_PadList = padList;
1439 desc.m_DataLayout = armnn::DataLayout::NHWC;
1440
1441 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1442
1443 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1444 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1445
1446 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1447
1448 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1449 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1450
1451 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1452 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1453}
1454
telsoa01c577f2c2018-08-31 09:22:23 +01001455armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1456 const armnn::TensorInfo & inputTensorInfo)
1457{
1458 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1459 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1460 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1461
1462 if (inputTensorInfo.GetNumDimensions() > 4)
1463 {
1464 std::stringstream ss;
1465 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1466 << " shape:" << inputTensorInfo.GetShape() << " "
1467 << CHECK_LOCATION().AsString();
1468 throw ParseException(ss.str());
1469 }
1470
1471 if (squeezeDims.empty())
1472 {
1473 squeezeDims.assign(dimensionSequence,
1474 dimensionSequence+inputTensorInfo.GetNumDimensions());
1475 }
1476
1477 std::vector<uint32_t> outputDims;
1478 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1479 {
1480 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1481 auto currentDimension = inputTensorInfo.GetShape()[i];
1482 if (skipSqueeze || currentDimension != 1)
1483 {
1484 outputDims.push_back(currentDimension);
1485 }
1486 }
1487
1488 if (outputDims.size() > 4)
1489 {
1490 std::stringstream ss;
1491 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1492 << " shape:" << inputTensorInfo.GetShape() << " "
1493 << CHECK_LOCATION().AsString();
1494 throw ParseException(ss.str());
1495 }
1496
1497 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1498 outputDims.data());
1499
1500 // we need to preserve the tensor type and the quantization data as well
1501 TensorInfo outTensorInfo = inputTensorInfo;
1502 outTensorInfo.SetShape(outShape);
1503
1504 return outTensorInfo;
1505}
1506
1507void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1508{
1509 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1510
1511 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1512 CHECK_VALID_SIZE(inputs.size(), 1);
1513
1514 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1515 CHECK_VALID_SIZE(outputs.size(), 1);
1516
1517 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1518 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1519
1520 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1521 armnn::TensorInfo outputTensorInfo =
1522 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1523 inputTensorInfo);
1524
1525 ReshapeDescriptor reshapeDesc;
1526 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1527
1528 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1529 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1530 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1531
1532 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1533 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1534
1535 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1536 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1537}
1538
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001539void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1540{
1541 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1542
1543 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1544 CHECK_VALID_SIZE(inputs.size(), 4);
1545
1546 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1547 CHECK_VALID_SIZE(outputs.size(), 1);
1548
1549 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1550 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1551
1552 StridedSliceDescriptor desc;
1553 desc.m_BeginMask = options->begin_mask;
1554 desc.m_EllipsisMask = options->ellipsis_mask;
1555 desc.m_EndMask = options->end_mask;
1556 desc.m_NewAxisMask = options->new_axis_mask;
1557 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1558 desc.m_DataLayout = armnn::DataLayout::NHWC;
1559
1560 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1561 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1562
1563 std::vector<int> begin(beginTensorInfo.GetNumElements());
1564 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1565
1566 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1567 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1568
1569 std::vector<int> end(endTensorInfo.GetNumElements());
1570 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1571
1572 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1573 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1574
1575 std::vector<int> stride(strideTensorInfo.GetNumElements());
1576 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1577
1578 desc.m_Begin = begin;
1579 desc.m_End = end;
1580 desc.m_Stride = stride;
1581
1582 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1583 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1584
1585 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1586 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1587
1588 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1589 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1590
1591 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1592 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1593}
1594
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001595void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1596{
1597 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1598
1599 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1600 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1601
1602 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1603 CHECK_VALID_SIZE(inputs.size(), 2);
1604
1605 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1606 CHECK_VALID_SIZE(outputs.size(), 1);
1607
1608 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1609 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1610
1611 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1612 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1613
1614 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1615 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1616
1617 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1618 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1619 {
1620 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1621 }
1622 else
1623 {
1624 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1625 }
1626
1627 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1628
1629 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1630 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1631}
1632
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001633void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1634{
1635 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1636
1637 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1638 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1639
1640 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1641 CHECK_VALID_SIZE(inputs.size(), 2);
1642
1643 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1644 CHECK_VALID_SIZE(outputs.size(), 1);
1645
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001646 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1647 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1648
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001649 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1650 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1651
1652 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1653 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1654
1655 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001656 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1657 {
1658 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1659 }
1660 else
1661 {
1662 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1663 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001664
1665 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1666
1667 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1668 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1669}
1670
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001671void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1672{
1673 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1674
1675 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1676 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1677
1678 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1679 CHECK_VALID_SIZE(inputs.size(), 2);
1680
1681 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1682 CHECK_VALID_SIZE(outputs.size(), 1);
1683
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001684 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1685 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1686
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001687 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1688 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1689
1690 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1691 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1692
1693 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001694 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1695 {
1696 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1697 }
1698 else
1699 {
1700 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1701 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001702
1703 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1704
1705 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1706 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1707}
1708
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001709void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1710{
1711 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1712
1713 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1714
1715 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1716 CHECK_VALID_SIZE(outputs.size(), 1);
1717
1718 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1719 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1720
1721 armnn::MeanDescriptor desc;
1722 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1723 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1724 desc.m_Axis = axis;
1725
1726 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1727 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1728
1729 desc.m_KeepDims =
1730 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1731 true : false;
1732
1733 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1734 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1735
1736 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1737
1738 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1739 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1740
1741 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1742 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1743}
1744
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001745void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1746{
1747 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1748
1749 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1750
1751 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1752 CHECK_VALID_SIZE(outputs.size(), 1);
1753
1754 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1755 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1756
1757 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1758 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1759
1760 size_t step = 2;
1761 armnn::PadDescriptor desc;
1762 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1763 {
1764 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1765 }
1766
1767 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1768 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1769
1770 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1771 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1772
1773 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1774 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1775
1776 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1777 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1778}
1779
Sadik Armagan66dedc72019-12-10 16:32:07 +00001780void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
1781{
1782 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1783
1784 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1785 CHECK_VALID_SIZE(inputs.size(), 1);
1786
1787 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1788 CHECK_VALID_SIZE(outputs.size(), 1);
1789
1790 auto layerName = boost::str(boost::format("Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
1791
1792 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
1793 BOOST_ASSERT(layer != nullptr);
1794
1795 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1796 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1797
1798 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1799 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1800
1801 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1802 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1803}
Finn Williamsc42c3842019-01-22 14:18:11 +00001804
Sadik Armagan58f39192018-09-17 14:14:39 +01001805void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1806{
Finn Williamsc42c3842019-01-22 14:18:11 +00001807 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001808}
1809
1810void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1811{
Finn Williamsc42c3842019-01-22 14:18:11 +00001812 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1813}
Sadik Armagan58f39192018-09-17 14:14:39 +01001814
Finn Williamsc42c3842019-01-22 14:18:11 +00001815void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1816{
1817 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1818}
1819
Nina Drozd99851762019-04-09 09:37:38 +01001820void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1821{
1822 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1823}
1824
Finn Williamsc42c3842019-01-22 14:18:11 +00001825
1826void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1827{
1828 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001829 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1830 boost::ignore_unused(operatorPtr);
1831
1832 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1833 CHECK_VALID_SIZE(inputs.size(), 1);
1834
1835 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1836 CHECK_VALID_SIZE(outputs.size(), 1);
1837
Finn Williamsc42c3842019-01-22 14:18:11 +00001838 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001839 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001840 activationDesc.m_Function = activationType;
1841
1842 switch (activationType)
1843 {
1844 case ActivationFunction::ReLu:
1845 {
1846 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1847 break;
1848 }
1849 case ActivationFunction::BoundedReLu:
1850 {
1851 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1852 activationDesc.m_A = 6.0f;
1853 activationDesc.m_B = 0.0f;
1854 break;
1855 }
1856 case ActivationFunction::Sigmoid:
1857 {
1858 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1859 break;
1860 }
Nina Drozd99851762019-04-09 09:37:38 +01001861 case ActivationFunction::TanH:
1862 {
1863 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1864 activationDesc.m_A = 1.0f;
1865 activationDesc.m_B = 1.0f;
1866 break;
1867 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001868 default:
1869 {
1870 throw ParseException(
1871 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1872 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1873 }
1874 }
1875
1876 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001877
1878 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1879 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1880
1881 // register the input connection slots for the layer, connections are made after all layers have been created
1882 // only the tensors for the inputs are relevant, exclude the const tensors
1883 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1884 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1885
1886 // register the output connection slots for the layer, connections are made after all layers have been created
1887 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1888 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1889}
Sadikb94967b2018-09-19 15:30:00 +01001890armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1891 const std::vector<int32_t> & targetDimsIn)
1892{
1893 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1894 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1895
1896 if (stretchDim != targetDimsIn.end())
1897 {
1898 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1899 {
1900 throw ParseException(
1901 boost::str(
1902 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1903 }
1904
1905 auto targetNumElements =
1906 boost::numeric_cast<unsigned int>(
1907 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1908
1909 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1910 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1911 }
1912
1913 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1914
1915 TensorInfo reshapeInfo = inputTensorInfo;
1916 reshapeInfo.SetShape(outputShape);
1917
1918 return reshapeInfo;
1919}
1920
1921void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1922{
1923 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1924
1925 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001926
1927 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1928 CHECK_VALID_SIZE(outputs.size(), 1);
1929
1930 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1931 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1932
1933 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001934 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1935 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001936 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1937
kevmay0171972a82018-12-17 14:28:03 +00001938 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001939 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1940 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001941 {
1942 std::stringstream ss;
1943 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001944 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001945 << " does not equal output shape "
1946 << actualOutputTensorInfo.GetShape()
1947 << ": "
1948 << CHECK_LOCATION().AsString();
1949 throw ParseException(ss.str());
1950 }
1951
Sadikb94967b2018-09-19 15:30:00 +01001952 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001953 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001954
1955 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1956 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001957 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001958
1959 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1960 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1961
1962 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1963 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1964}
1965
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001966void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1967{
Sadik Armagana3b31f02019-12-05 09:08:53 +00001968 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
1969}
1970
1971void TfLiteParser::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
1972{
1973 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
1974}
1975
1976void TfLiteParser::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
1977{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001978 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1979
1980 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1981 CHECK_VALID_SIZE(inputs.size(), 2);
1982
1983 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1984 CHECK_VALID_SIZE(outputs.size(), 1);
1985
1986 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1987
1988 // Data for the parsed tensor args (size) must be stored locally.
1989 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1990
1991 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1992 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1993
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001994 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00001995 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001996 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001997 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1998 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001999
Sadik Armagana3b31f02019-12-05 09:08:53 +00002000 auto layerName = str(boost::format("Resize:"));
2001
2002 switch (resizeMethod)
2003 {
2004 case ResizeMethod::Bilinear:
2005 {
2006 layerName += str(boost::format("BILINEAR:%1%:%2%") % subgraphIndex % operatorIndex);
2007 break;
2008 }
2009 case ResizeMethod::NearestNeighbor:
2010 {
2011 layerName += str(boost::format("NEARESTNEIGHBOR:%1%:%2%") % subgraphIndex % operatorIndex);
2012 break;
2013 }
2014 default:
2015 {
2016 throw ParseException(
2017 boost::str(boost::format("Unexpected ResizeMethod[%1%] when creating layerName "
2018 " %2% ") %static_cast<int>(resizeMethod)% CHECK_LOCATION().AsString()));
2019 }
2020 }
2021
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002022 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002023
2024 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2025 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2026
2027 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2028 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2029
2030 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2031 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2032}
2033
Sadik Armagan479045b2018-10-01 11:51:37 +01002034void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
2035{
2036 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2037
2038 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2039 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2040
2041 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2042
2043 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2044 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2045 CHECK_VALID_SIZE(outputs.size(), 1);
2046
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002047 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2048 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002049
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002050 const unsigned int concatDimInput = static_cast<unsigned int>(
2051 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002052
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002053 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2054 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002055
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002056 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002057
2058 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2059 {
2060 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2061
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002062 // This set up concatDescriptor view origin
2063 armnnUtils::ProcessConcatInputTensorInfo(
2064 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002065 }
2066
2067 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01002068 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01002069
2070 BOOST_ASSERT(layer != nullptr);
2071
2072 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2073 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01002074
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002075 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002076
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002077 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002078
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002079 // add fused activation layer
2080 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002081
Sadik Armagan479045b2018-10-01 11:51:37 +01002082 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2083 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2084}
2085
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002086void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
2087{
2088 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2089
2090 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2091 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2092
2093 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2094
2095 FullyConnectedDescriptor desc;
2096 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002097 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002098
2099 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2100 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2101 CHECK_VALID_SIZE(outputs.size(), 1);
2102
2103 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2104
2105 // Fully Connected Layer accepts two dimensional weights input
2106 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2107 if (weightsDimension != 2)
2108 {
2109 throw ParseException(
2110 boost::str(
2111 boost::format(
2112 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
2113 "Node %2%")
2114 % weightsDimension
2115 % CHECK_LOCATION().AsString()));
2116 }
2117
Matteo Martincigh747ef822018-12-18 09:26:39 +00002118 auto filterTensorAndData = CreateConstTensor(inputs[1],
2119 filterTensorInfo,
2120 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002121 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002122 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
2123
2124 if (inputs.size() == 3)
2125 {
2126 desc.m_BiasEnabled = true;
2127 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00002128 auto biasTensorAndData = CreateConstTensor(inputs[2],
2129 biasTensorInfo,
2130 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002131 layer = m_Network->AddFullyConnectedLayer(desc,
2132 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002133 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002134 layerName.c_str());
2135 }
2136 else
2137 {
2138 layer = m_Network->AddFullyConnectedLayer(desc,
2139 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002140 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002141 layerName.c_str());
2142 }
2143 BOOST_ASSERT(layer != nullptr);
2144
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002145 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2146
2147 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2148
2149 if (inputTensorInfo.GetNumDimensions() > 2)
2150 {
2151 // Add reshape to flatten to 2D [batch_size, input_size],
2152 // where "input_size" corresponds to the number of inputs to the layer,
2153 // matching the second dimension of weights,
2154 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2155 std::vector<unsigned int> reshapedDimensions(2);
2156 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2157 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2158
2159 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2160 {
2161 throw ParseException(
2162 boost::str(
2163 boost::format(
2164 "Failed to deduce input tensor shape from filter size %1%")
2165 % reshapedDimensions[1]
2166 % CHECK_LOCATION().AsString()));
2167 }
2168
2169 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2170 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2171
2172 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2173 armnn::ReshapeDescriptor desc;
2174 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2175 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2176
2177 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2178 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2179
2180 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2181 }
2182 else
2183 {
2184 // register the input connection slot for the layer
2185 // only the tensors for the inputs are relevant, exclude the const tensors
2186 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2187 }
2188
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002189 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2190 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2191
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002192 // we need to add the activation layer and fortunately we don't need to care about the data layout
2193 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2194 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002195
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002196 // register the output connection slots for the layer, connections are made after all layers have been created
2197 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2198 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2199}
2200
keidav011b3e2ea2019-02-21 10:07:37 +00002201void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
2202{
2203 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2204
2205 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2206
2207 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2208 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2209 CHECK_VALID_SIZE(outputs.size(), 4);
2210
2211 // Obtain custom options from flexbuffers
2212 auto custom_options = operatorPtr->custom_options;
2213 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2214
2215 // Obtain descriptor information from tf lite
2216 DetectionPostProcessDescriptor desc;
2217 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2218 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2219 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2220 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2221 desc.m_NumClasses = m["num_classes"].AsUInt32();
2222 desc.m_ScaleH = m["h_scale"].AsFloat();
2223 desc.m_ScaleW = m["w_scale"].AsFloat();
2224 desc.m_ScaleX = m["x_scale"].AsFloat();
2225 desc.m_ScaleY = m["y_scale"].AsFloat();
2226
keidav0107d58c72019-02-26 11:57:39 +00002227 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002228 {
keidav0107d58c72019-02-26 11:57:39 +00002229 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002230 }
2231 if (!(m["detections_per_class"].IsNull()))
2232 {
2233 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2234 }
2235
2236 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2237 {
2238 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2239 "must be positive and less than or equal to 1.");
2240 }
2241
2242 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2243 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2244 armnn::Optional<armnn::PermutationVector&>());
2245
2246 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2247 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2248 layerName.c_str());
2249
2250 BOOST_ASSERT(layer != nullptr);
2251
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002252 // The model does not specify the output shapes.
2253 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2254 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2255 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2256 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2257 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2258 m_OverridenOutputShapes.push_back({ 1 });
2259
keidav011b3e2ea2019-02-21 10:07:37 +00002260 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2261 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002262 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002263 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2264 }
2265
2266 // Register the input connection slots for the layer, connections are made after all layers have been created
2267 // only the tensors for the inputs are relevant, exclude the const tensors
2268 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2269 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2270
2271 // Register the output connection slots for the layer, connections are made after all layers have been created
2272 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2273 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2274 outputTensorIndexes[1],
2275 outputTensorIndexes[2],
2276 outputTensorIndexes[3]});
2277}
2278
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002279/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2280void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2281{
2282 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2283
2284 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2285 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2286 CHECK_VALID_SIZE(outputs.size(), 1);
2287
2288 if (inputs.size() < 1)
2289 {
2290 throw ParseException("Pack must have at least one input.");
2291 }
2292
2293 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2294 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2295
2296 StackDescriptor desc;
2297 desc.m_Axis = static_cast<uint32_t>(options->axis);
2298 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2299
2300 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2301 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2302 desc.m_InputShape = inputTensorInfo.GetShape();
2303
2304 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2305 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2306
2307 BOOST_ASSERT(layer != nullptr);
2308
2309 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2310 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2311
2312 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2313 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2314
2315 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2316 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2317}
2318
Nina Drozd200e3802019-04-15 09:47:39 +01002319void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2320{
2321 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2322
2323 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2324 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2325
2326 // This unpackAxis indicates the axis to unpack
2327 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2328
2329 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2330 CHECK_VALID_SIZE(inputs.size(), 1);
2331
2332 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002333
2334 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2335 {
2336 throw ParseException(
2337 boost::str(
2338 boost::format(
2339 "The unpack axis: %1% cannot be greater than or equal to "
2340 "the number of input dimension %2% %3%")
2341 % unpackAxis
2342 % inputTensorInfo.GetNumDimensions()
2343 % CHECK_LOCATION().AsString()));
2344 }
2345
Nina Drozd200e3802019-04-15 09:47:39 +01002346 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2347 // If num is not defined, automatically infer from the length of the dimension axis.
2348 if(unpackNum == 0)
2349 {
2350 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2351 }
2352
2353 // If unpack number cannot be inferred and is still zero, throw ParseException.
2354 if(unpackNum == 0)
2355 {
2356 throw ParseException("Number to unpack must greater than zero.");
2357 }
2358
2359 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2360 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2361
2362 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2363 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2364
2365 // Add current input shape to unpackDimSizes
2366 for (unsigned int i = 0; i < inputDimSize; ++i)
2367 {
2368 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2369 }
2370
2371 if (unpackDimSizes[unpackAxis] != unpackNum)
2372 {
2373 throw ParseException("Number to unpack must be the same as length of the dimension to "
2374 "unpack along.");
2375 }
2376
2377 unpackDimSizes[unpackAxis] /= unpackNum;
2378
2379 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2380 for (unsigned int j = 0; j < unpackNum; ++j)
2381 {
2382 // Set the size of the views.
2383 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2384 {
2385 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2386 }
2387 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2388 }
2389
2390 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2391 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2392
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002393 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2394 unpackDimSizes.data());
2395
Nina Drozd200e3802019-04-15 09:47:39 +01002396 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2397 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2398
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002399 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2400 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2401 {
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002402 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002403 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2404 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002405 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002406 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2407
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002408 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2409 outputTensorInfo.GetDataType(),
2410 outputTensorInfo.GetQuantizationScale(),
2411 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002412 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2413
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002414 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002415
2416 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2417 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2418 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2419 }
Nina Drozd200e3802019-04-15 09:47:39 +01002420}
2421
Nina Drozd0324f482019-04-08 10:52:10 +01002422void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2423{
2424 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2425
2426 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2427 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2428
2429 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2430
Nina Drozd200e3802019-04-15 09:47:39 +01002431 // If number of splits cannot be inferred and is zero, throw ParseException.
2432 if(numSplits == 0)
2433 {
2434 throw ParseException("Number to splits must greater than zero.");
2435 }
2436
Nina Drozd0324f482019-04-08 10:52:10 +01002437 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2438 CHECK_VALID_SIZE(inputs.size(), 2);
2439 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2440 CHECK_VALID_SIZE(outputs.size(), numSplits);
2441
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002442 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2443 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002444
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002445 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2446 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2447 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2448
2449 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2450 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002451
2452 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2453 if (splitDim == 0 || splitDim == 2)
2454 {
2455 throw ParseException(
2456 boost::str(
2457 boost::format(
2458 "Dimension %1% for split is not supported by Armnn. %2%")
2459 % splitDim
2460 % CHECK_LOCATION().AsString()));
2461 }
2462
2463 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002464 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002465 {
2466 throw ParseException(
2467 boost::str(
2468 boost::format(
2469 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002470 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002471 % inputTensorInfo.GetNumDimensions()
2472 % MaxNumOfTensorDimensions
2473 % CHECK_LOCATION().AsString()));
2474 }
2475
2476 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2477
2478 // Add current input shape to splitterDimSizes
2479 for (unsigned int i = 0; i < inputDimSize; ++i)
2480 {
2481 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2482 }
2483
2484 if (splitterDimSizes[splitDim] % numSplits != 0)
2485 {
2486 throw ParseException("Number of splits must evenly divide the dimension");
2487 }
2488 splitterDimSizes[splitDim] /= numSplits;
2489
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002490 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002491 for (unsigned int j = 0; j < numSplits; ++j)
2492 {
2493 // Set the size of the views.
2494 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2495 {
2496 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2497 }
2498 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2499 }
2500
2501 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2502 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2503
2504 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002505 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002506
Nina Drozd0324f482019-04-08 10:52:10 +01002507 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2508 {
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002509 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k]);
2510 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002511 }
2512
2513 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2514 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2515}
2516
Sadik Armagan58f39192018-09-17 14:14:39 +01002517armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2518 unsigned int outputSlot,
2519 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002520{
2521 ActivationDescriptor activationDesc;
2522 std::string layerName = prevLayer->GetName();
2523
2524 switch(activationType)
2525 {
2526 case tflite::ActivationFunctionType_NONE:
2527 {
2528 // this is a no-op: return previous layer
2529 return prevLayer;
2530 }
2531 case tflite::ActivationFunctionType_RELU:
2532 {
2533 activationDesc.m_Function = ActivationFunction::ReLu;
2534 layerName += ":RELU";
2535 break;
2536 }
2537 case tflite::ActivationFunctionType_RELU6:
2538 {
2539 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2540 activationDesc.m_A = 6.0f;
2541 activationDesc.m_B = 0.0f;
2542 layerName += ":RELU6";
2543 break;
2544 }
2545 case tflite::ActivationFunctionType_TANH:
2546 {
2547 activationDesc.m_Function = ActivationFunction::TanH;
2548 activationDesc.m_A = 1.0f;
2549 activationDesc.m_B = 1.0f;
2550 layerName += ":TANH";
2551 break;
2552 }
2553
2554 // I only put these here as a reminder what others we could support
2555 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2556 case tflite::ActivationFunctionType_SIGN_BIT:
2557 default:
2558 {
2559 throw ParseException(
2560 boost::str(
2561 boost::format("TfLite parser doesn't suppport fused activation: "
2562 "%1%/%2% %3% ") %
2563 activationType %
2564 tflite::EnumNameActivationFunctionType(activationType) %
2565 CHECK_LOCATION().AsString()));
2566
2567 }
2568 }
2569
2570 IConnectableLayer* activationLayer =
2571 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2572
2573 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2574 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2575 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2576 return activationLayer;
2577}
2578
2579TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2580{
2581 if (fileName == nullptr)
2582 {
2583 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2584 CHECK_LOCATION().AsString()));
2585 }
2586 boost::system::error_code errorCode;
2587 boost::filesystem::path pathToFile(fileName);
2588 if (!boost::filesystem::exists(pathToFile, errorCode))
2589 {
2590 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2591 fileName %
2592 errorCode %
2593 CHECK_LOCATION().AsString()));
2594 }
2595 std::ifstream file(fileName, std::ios::binary);
2596 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2597 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2598 fileContent.size());
2599}
2600
2601TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2602{
2603 if (binaryContent == nullptr)
2604 {
2605 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2606 CHECK_LOCATION().AsString()));
2607 }
2608 flatbuffers::Verifier verifier(binaryContent, len);
2609 if (verifier.VerifyBuffer<tflite::Model>() == false)
2610 {
2611 throw ParseException(
2612 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2613 "flatbuffers format. size:%1% %2%") %
2614 len %
2615 CHECK_LOCATION().AsString()));
2616 }
2617 return tflite::UnPackModel(binaryContent);
2618}
2619
2620TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2621 size_t subgraphIndex,
2622 size_t operatorIndex)
2623{
2624 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2625
Derek Lambertiff05cc52019-04-26 13:05:17 +01002626 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2627 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002628
2629 size_t inputCount = operatorPtr->inputs.size();
2630 TensorRawPtrVector result(inputCount);
2631 for (size_t i=0; i<inputCount; ++i)
2632 {
2633 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002634 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002635 }
2636 return result;
2637}
2638
2639TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2640 size_t subgraphIndex,
2641 size_t operatorIndex)
2642{
2643 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2644
Derek Lambertiff05cc52019-04-26 13:05:17 +01002645 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2646 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002647
2648 size_t outputCount = operatorPtr->outputs.size();
2649 TensorRawPtrVector result(outputCount);
2650 for (size_t i=0; i<outputCount; ++i)
2651 {
2652 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2653 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002654 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002655 }
2656 return result;
2657}
2658
2659TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2660 size_t subgraphIndex)
2661{
2662 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002663 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002664
Derek Lambertiff05cc52019-04-26 13:05:17 +01002665 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002666 TensorIdRawPtrVector result(inputCount);
2667 for (size_t i=0; i<inputCount; ++i)
2668 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002669 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002670 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002671 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002672 }
2673 return result;
2674}
2675
2676TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2677 size_t subgraphIndex)
2678{
2679 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002680 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002681
Derek Lambertiff05cc52019-04-26 13:05:17 +01002682 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002683 TensorIdRawPtrVector result(outputCount);
2684 for (size_t i=0; i<outputCount; ++i)
2685 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002686 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2687 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002688 }
2689 return result;
2690}
2691
2692std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2693 size_t subgraphIndex,
2694 size_t operatorIndex)
2695{
2696 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002697 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2698 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002699 return operatorPtr->inputs;
2700}
2701
2702std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2703 size_t subgraphIndex,
2704 size_t operatorIndex)
2705{
2706 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002707 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2708 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002709 return operatorPtr->outputs;
2710}
2711
2712void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2713 size_t operatorIndex,
2714 IConnectableLayer* layer,
2715 const std::vector<unsigned int>& tensorIndexes)
2716{
2717 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2718 BOOST_ASSERT(layer != nullptr);
2719 if (tensorIndexes.size() != layer->GetNumInputSlots())
2720 {
2721 throw ParseException(
2722 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2723 " for subgraph:%3% operator index:%4% %5%") %
2724 tensorIndexes.size() %
2725 layer->GetNumInputSlots() %
2726 subgraphIndex %
2727 operatorIndex %
2728 CHECK_LOCATION().AsString()));
2729 }
2730
2731 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2732 {
2733 unsigned int tensorIndex = tensorIndexes[slotIndex];
2734 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2735 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2736 }
2737}
2738
2739void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2740 size_t operatorIndex,
2741 IConnectableLayer* layer,
2742 const std::vector<unsigned int>& tensorIndexes)
2743{
2744 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2745 BOOST_ASSERT(layer != nullptr);
2746 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2747 {
2748 throw ParseException(
2749 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2750 " for subgraph:%3% operator index:%4% %5%") %
2751 tensorIndexes.size() %
2752 layer->GetNumOutputSlots() %
2753 subgraphIndex %
2754 operatorIndex %
2755 CHECK_LOCATION().AsString()));
2756 }
2757
2758 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2759 {
2760 unsigned int tensorIndex = tensorIndexes[slotIndex];
2761 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2762 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2763 }
2764}
2765
2766void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2767{
2768 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2769
2770 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2771 for (auto const & tensorIdAndPtr : inputs)
2772 {
2773 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2774 IConnectableLayer* layer =
2775 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2776
2777 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2778 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2779
2780 RegisterOutputSlots(subgraphIndex,
2781 VIRTUAL_OPERATOR_ID,
2782 layer,
2783 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2784 }
2785}
2786
2787void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2788{
2789 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2790
2791 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2792 for (auto const & tensorIdAndPtr : outputs)
2793 {
2794 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2795 IConnectableLayer* layer =
2796 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2797
2798 RegisterInputSlots(subgraphIndex,
2799 VIRTUAL_OPERATOR_ID,
2800 layer,
2801 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2802 }
2803}
2804
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002805void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2806{
2807 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2808
Derek Lambertiff05cc52019-04-26 13:05:17 +01002809 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002810 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2811 {
2812 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2813 {
2814 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2815 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2816 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002817 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002818 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2819 auto tensorAndData = CreateConstTensor(tensorPtr,
2820 tensorInfo,
2821 armnn::Optional<armnn::PermutationVector&>());
2822
2823 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2824 IConnectableLayer *layer =
2825 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2826
2827 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2828 RegisterOutputSlots(subgraphIndex,
2829 VIRTUAL_OPERATOR_ID,
2830 layer,
2831 { tensorIndex });
2832
2833 }
2834 }
2835 }
2836}
2837
telsoa01c577f2c2018-08-31 09:22:23 +01002838// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2839TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2840{
2841 CHECK_BUFFER(model, bufferIndex);
2842 return model->buffers[bufferIndex].get();
2843}
2844
Matteo Martincigh747ef822018-12-18 09:26:39 +00002845template<typename T>
2846std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2847TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2848 TfLiteParser::TensorRawPtr tensorPtr,
2849 armnn::TensorInfo& tensorInfo,
2850 armnn::Optional<armnn::PermutationVector&> permutationVector)
2851{
2852 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2853 tensorPtr,
2854 tensorInfo,
2855 permutationVector);
2856 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2857 return std::make_pair(constData.first, std::move(storage));
2858}
2859
telsoa01c577f2c2018-08-31 09:22:23 +01002860std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2861TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002862 armnn::TensorInfo& tensorInfo,
2863 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002864{
2865 CHECK_TENSOR_PTR(tensorPtr);
2866 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2867 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2868
2869 switch (tensorInfo.GetDataType())
2870 {
2871 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002872 return CreateConstTensorAndStoreData<float>(bufferPtr,
2873 tensorPtr,
2874 tensorInfo,
2875 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00002876 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002877 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2878 tensorPtr,
2879 tensorInfo,
2880 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00002881 case armnn::DataType::QSymmS8:
2882 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
2883 tensorPtr,
2884 tensorInfo,
2885 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002886 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002887 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2888 tensorPtr,
2889 tensorInfo,
2890 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002891 default:
2892 {
2893 std::stringstream errString;
2894 errString << "Unexpected datatype when creating const tensor: "
2895 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2896 << " shape:" << tensorInfo.GetShape()
2897 << CHECK_LOCATION().AsString();
2898 throw ParseException(errString.str());
2899 }
2900 }
2901}
2902
2903BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2904 const std::string& name) const
2905{
2906 CHECK_SUBGRAPH(m_Model, subgraphId);
2907 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2908 for (auto const & input : inputs)
2909 {
2910 if (input.second->name == name)
2911 {
2912 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2913 return std::make_pair(bindingId, ToTensorInfo(input.second));
2914 }
2915 }
2916
2917 std::stringstream bindings;
2918 for (auto const & input : inputs)
2919 {
2920 bindings << "'" << input.second->name << "' ";
2921 }
2922
2923 throw ParseException(
2924 boost::str(
2925 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2926 "Possible inputs are: [%3%] %4%") %
2927 subgraphId %
2928 name %
2929 bindings.str() %
2930 CHECK_LOCATION().AsString()));
2931}
2932
2933BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2934 const std::string& name) const
2935{
2936 CHECK_SUBGRAPH(m_Model, subgraphId);
2937 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002938 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002939 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002940 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002941 if (output.second->name == name)
2942 {
2943 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002944 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2945 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2946 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002947 }
2948 }
2949
2950 std::stringstream bindings;
2951 for (auto const & output : outputs)
2952 {
2953 bindings << "'" << output.second->name << "' ";
2954 }
2955
2956 throw ParseException(
2957 boost::str(
2958 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2959 "Possible outputs are: [%3%] %4%") %
2960 subgraphId %
2961 name %
2962 bindings.str() %
2963 CHECK_LOCATION().AsString()));
2964}
2965
2966size_t TfLiteParser::GetSubgraphCount() const
2967{
2968 return m_Model->subgraphs.size();
2969}
2970
2971std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2972{
2973 CHECK_SUBGRAPH(m_Model, subgraphId);
2974 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2975 std::vector<std::string> result;
2976 result.reserve(inputs.size());
2977 for (auto const & input : inputs)
2978 {
2979 result.push_back(input.second->name);
2980 }
2981 return result;
2982}
2983
2984std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2985{
2986 CHECK_SUBGRAPH(m_Model, subgraphId);
2987 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2988 std::vector<std::string> result;
2989 result.reserve(outputs.size());
2990 for (auto const & output : outputs)
2991 {
2992 result.push_back(output.second->name);
2993 }
2994 return result;
2995}
2996
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01002997ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01002998{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01002999 return new TfLiteParser(options);
telsoa01c577f2c2018-08-31 09:22:23 +01003000}
3001
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003002ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003003{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003004 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +01003005}
3006
3007void ITfLiteParser::Destroy(ITfLiteParser* parser)
3008{
3009 delete parser;
3010}
3011
3012TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
3013: m_FloatData(std::move(data))
3014, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003015, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003016, m_Int32Data(nullptr)
3017{
3018}
3019
3020TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
3021: m_FloatData(nullptr)
3022, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00003023, m_Int8Data(nullptr)
3024, m_Int32Data(nullptr)
3025{
3026}
3027
3028TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
3029: m_FloatData(nullptr)
3030, m_Uint8Data(nullptr)
3031, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01003032, m_Int32Data(nullptr)
3033{
3034}
3035
3036TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
3037: m_FloatData(nullptr)
3038, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003039, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003040, m_Int32Data(std::move(data))
3041{
3042}
3043
3044} // armnnTfLiteParser