blob: c695caa28027b340ac95012948b074133fdc1ffb [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Bentham39ef3e52020-01-20 10:09:09 +00008#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +01009#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000010#include <armnn/Logging.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010011#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010012#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000013#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010014#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010015
16// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000017#include <armnnUtils/Permute.hpp>
18
Sadik Armagan479045b2018-10-01 11:51:37 +010019#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010020#include <VerificationHelpers.hpp>
21
22// The generated code based on the Tf Lite schema:
23#include <schema_generated.h>
24
Matteo Martincighe011d202019-11-28 11:35:47 +000025#include <flatbuffers/flexbuffers.h>
26
telsoa01c577f2c2018-08-31 09:22:23 +010027#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010028#include <boost/numeric/conversion/cast.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000029#include <boost/filesystem.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010030
31#include <fstream>
32#include <algorithm>
33#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010034#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000035#include <sstream>
36
37#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
38 { \
39 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
40 << ": " \
41 << CHECK_LOCATION().AsString()).str()); \
42 }
telsoa01c577f2c2018-08-31 09:22:23 +010043
44using namespace armnn;
45using armnn::CheckLocation;
46namespace armnnTfLiteParser
47{
48namespace
49{
jimfly01c25411c2018-11-14 17:47:22 +000050
telsoa01c577f2c2018-08-31 09:22:23 +010051const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
52
53void CheckSubgraph(const TfLiteParser::ModelPtr & model,
54 size_t subgraphIndex,
55 const CheckLocation & location)
56{
57 if (model.get() == nullptr)
58 {
59 throw ParseException(
60 boost::str(
61 boost::format("%1% was called with invalid (null) model. "
62 "Possible reason is that the model is not yet loaded and Unpack(ed). "
63 "subgraph:%2% at %3%") %
64 location.m_Function %
65 subgraphIndex %
66 location.FileLine()));
67 }
68 else if (subgraphIndex >= model->subgraphs.size())
69 {
70 throw ParseException(
71 boost::str(
72 boost::format("%1% was called with an invalid subgraph index. "
73 "subgraph:%2% at %3%") %
74 location.m_Function %
75 subgraphIndex %
76 location.FileLine()));
77 }
78}
79
80#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
81 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
82
83void CheckModel(const TfLiteParser::ModelPtr & model,
84 size_t subgraphIndex,
85 size_t operatorIndex,
86 const CheckLocation & location)
87{
88 if (model.get() == nullptr)
89 {
90 throw ParseException(
91 boost::str(
92 boost::format("%1% was called with invalid (null) model. "
93 "Possible reason is that the model is not yet loaded and Unpack(ed). "
94 "subgraph:%2% operator:%3% at %4%") %
95 location.m_Function %
96 subgraphIndex %
97 operatorIndex %
98 location.FileLine()));
99 }
100 else if (subgraphIndex >= model->subgraphs.size())
101 {
102 throw ParseException(
103 boost::str(
104 boost::format("%1% was called with an invalid subgraph index. "
105 "subgraph:%2% operator:%3% at %4%") %
106 location.m_Function %
107 subgraphIndex %
108 operatorIndex %
109 location.FileLine()));
110 }
111 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
112 operatorIndex != VIRTUAL_OPERATOR_ID)
113 {
114 throw ParseException(
115 boost::str(
116 boost::format("%1% was called with an invalid operator index. "
117 "subgraph:%2% operator:%3% at %4%") %
118 location.m_Function %
119 subgraphIndex %
120 operatorIndex %
121 location.FileLine()));
122 }
123}
124
125#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
126 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
127
128void CheckTensor(const TfLiteParser::ModelPtr & model,
129 size_t subgraphIndex,
130 size_t tensorIndex,
131 const CheckLocation & location)
132{
133 // not checking model, because I assume CHECK_MODEL already run
134 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100135 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100136
137 // also subgraph index should be checked by CHECK_MODEL so
138 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100139 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100140
141 // the tensor index is the only one to check here
142 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
143 {
144 throw ParseException(
145 boost::str(
146 boost::format("%1% was called with an invalid tensor index. "
147 "subgraph:%2% tensor:%3% at %4%") %
148 location.m_Function %
149 subgraphIndex %
150 tensorIndex %
151 location.FileLine()));
152 }
153}
154
155#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
156 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
157
158void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
159 const CheckLocation & location)
160{
161 if (rawPtr == nullptr)
162 {
163 throw ParseException(
164 boost::str(
165 boost::format("%1% was called with a null tensor pointer. "
166 "at %2%") %
167 location.m_Function %
168 location.FileLine()));
169
170 }
171}
172
173#define CHECK_TENSOR_PTR(TENSOR_PTR) \
174 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
175
176void CheckBuffer(const TfLiteParser::ModelPtr & model,
177 size_t bufferIndex,
178 const CheckLocation & location)
179{
180 if (model.get() == nullptr)
181 {
182 throw ParseException(
183 boost::str(
184 boost::format("%1% was called with invalid (null) model. "
185 "Possible reason is that the model is not yet loaded and Unpack(ed). "
186 "buffer:%2% at %3%") %
187 location.m_Function %
188 bufferIndex %
189 location.FileLine()));
190 }
191 else if (bufferIndex >= model->buffers.size())
192 {
193 throw ParseException(
194 boost::str(
195 boost::format("%1% was called with an invalid buffer index. "
196 "buffer index:%2% at %3%") %
197 location.m_Function %
198 bufferIndex %
199 location.FileLine()));
200 }
201 else if (model->buffers[bufferIndex].get() == nullptr)
202 {
203 throw ParseException(
204 boost::str(
205 boost::format("The buffer #%1% is null. %3%") %
206 bufferIndex %
207 location.AsString()));
208 }
209}
210
211#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
212 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
213
214void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
215 const armnn::TensorInfo & tensorInfo,
216 uint32_t bufferId,
217 const CheckLocation & location)
218{
219 if (bufferPtr == nullptr)
220 {
221 throw ParseException(
222 boost::str(
223 boost::format("BufferPtr is null for buffer:%1%. %2%") %
224 bufferId %
225 location.AsString()));
226 }
227 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
228 tensorInfo.GetNumBytes() > bufferPtr->data.size())
229 {
230 std::stringstream ss;
231 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
232 << "For tensor: " << tensorInfo.GetShape()
233 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
234 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
235 throw ParseException(ss.str());
236 }
237}
238
239#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
240 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
241
242bool IsActivationSupported(tflite::ActivationFunctionType activationType)
243{
244 switch(activationType)
245 {
246 case tflite::ActivationFunctionType_NONE:
247 case tflite::ActivationFunctionType_RELU:
248 case tflite::ActivationFunctionType_RELU6:
249 case tflite::ActivationFunctionType_TANH:
250 {
251 return true;
252 }
253 default:
254 {
255 return false;
256 }
257 }
258}
259
260#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
261 do { \
262 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
263 { \
264 throw ParseException( \
265 boost::str( \
266 boost::format("TfLite parser doesn't suppport fused activation: " \
267 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
268 OPTION->fused_activation_function % \
269 tflite::EnumNameActivationFunctionType(\
270 OPTION->fused_activation_function) % \
271 __func__ % \
272 SUBGRAPH_INDEX % \
273 OPERATOR_INDEX % \
274 CHECK_LOCATION().FileLine())); \
275 } \
276 } while(false)
277
278
279std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
280{
281 std::vector<unsigned int> result;
282 result.reserve(in.size());
283 for (auto & i : in)
284 {
285 result.push_back(CHECKED_NON_NEGATIVE(i));
286 }
287 return result;
288}
289
290void CalcPadding(uint32_t inputSize,
291 uint32_t filterSize,
292 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100293 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100294 uint32_t& paddingFront,
295 uint32_t& paddingBack,
296 tflite::Padding padding)
297{
298 paddingFront = 0;
299 paddingBack = 0;
300 if (padding == tflite::Padding_SAME)
301 {
302 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100303 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
304 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100305 if (temp > inputSize)
306 {
307 paddingFront = (temp - inputSize) / 2;
308 paddingBack = (temp - inputSize) - paddingFront;
309 }
310 }
311}
312
Keith Davis0c2eeac2020-02-11 16:51:50 +0000313armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes,
314 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
telsoa01c577f2c2018-08-31 09:22:23 +0100315{
316 armnn::DataType type;
317 CHECK_TENSOR_PTR(tensorPtr);
318
319 switch (tensorPtr->type)
320 {
321 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000322 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100323 break;
324 case tflite::TensorType_FLOAT32:
325 type = armnn::DataType::Float32;
326 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000327 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000328 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000329 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000330 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000331 type = armnn::DataType::QAsymmS8;
332 }
333 else
334 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000335 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000336 type = armnn::DataType::QSymmS8;
337 }
Finn Williamsed66d142019-12-06 09:55:55 +0000338 break;
339 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000340 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000341 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100342 case tflite::TensorType_INT32:
343 type = armnn::DataType::Signed32;
344 break;
345
346 default:
347 {
348 CheckLocation location = CHECK_LOCATION();
349 throw ParseException(
350 boost::str(
351 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
352 tensorPtr->type %
353 tflite::EnumNameTensorType(tensorPtr->type) %
354 tensorPtr->name %
355 location.AsString()));
356 }
357 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100358 std::vector<unsigned int> safeShape = shapes;
359 if (safeShape.size() == 0)
360 {
361 safeShape.push_back(1);
362 }
363
Keith Davisd305e1a2020-01-22 11:57:54 +0000364 float quantizationScale = 0.0f;
365 int32_t quantizationOffset = 0;
366
367 if (tensorPtr->quantization.get())
368 {
369 if (tensorPtr->quantization->scale.size() <= 1)
370 {
371 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
372 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
373
374 if (tensorPtr->quantization->scale.size() == 1)
375 {
376 quantizationScale = tensorPtr->quantization->scale[0];
377 }
378 if (tensorPtr->quantization->zero_point.size() == 1)
379 {
380 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000381 // but this is what we support at the moment in ArmNN
Keith Davisd305e1a2020-01-22 11:57:54 +0000382 quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
383 }
384
385 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
386 safeShape.data(),
387 type,
388 quantizationScale,
389 quantizationOffset);
390
391 return result;
392 }
393 else
394 {
395 std::vector<float> quantizationScales;
396 std::vector<int32_t> quantizationOffsets;
397
398 // Scale
399 std::copy(tensorPtr->quantization->scale.begin(),
400 tensorPtr->quantization->scale.end(),
401 std::back_inserter(quantizationScales));
402
Keith Davis0c2eeac2020-02-11 16:51:50 +0000403 // QSymmS8 Per-axis
Keith Davisd305e1a2020-01-22 11:57:54 +0000404 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
405 safeShape.data(),
406 type,
407 quantizationScales,
Keith Davis0c2eeac2020-02-11 16:51:50 +0000408 dimensionMappings[boost::numeric_cast<unsigned int>(
409 tensorPtr->quantization->quantized_dimension)]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000410 return result;
411 }
412 }
413 else
414 {
415 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
416 safeShape.data(),
417 type,
418 quantizationScale,
419 quantizationOffset);
420 return result;
421 }
telsoa01c577f2c2018-08-31 09:22:23 +0100422}
423
Keith Davis0c2eeac2020-02-11 16:51:50 +0000424armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
425 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000426{
427 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000428 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000429}
430
telsoa01c577f2c2018-08-31 09:22:23 +0100431template<typename T>
432std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
433CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
434 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000435 armnn::TensorInfo& tensorInfo,
436 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100437{
Jan Eilers8eb25602020-03-09 12:13:48 +0000438 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100439 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
440 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
telsoa01c577f2c2018-08-31 09:22:23 +0100441 boost::str(
442 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
443
444 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000445
446 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
447 {
448 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000449 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
450 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000451 }
452 else
453 {
454 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
455 }
456
telsoa01c577f2c2018-08-31 09:22:23 +0100457 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
458}
459
telsoa01c577f2c2018-08-31 09:22:23 +0100460armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
461{
462 // generate the binding id by shifting the tensor id by 8 bit
463 // and add the subgraph id, which allows 256 subgraphs
464 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
465}
466
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000467bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
468{
469 const unsigned int actualSize = actual.GetNumDimensions();
470 if (actualSize != expected.size())
471 {
472 return false;
473 }
474
475 for (unsigned int i = 0u; i < actualSize; i++)
476 {
477 if (expected[i] < 0 ||
478 actual[i] != static_cast<unsigned int>(expected[i]))
479 {
480 return false;
481 }
482 }
483
484 return true;
485}
486
telsoa01c577f2c2018-08-31 09:22:23 +0100487} // <anonymous>
488
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100489TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
490: m_Options(options)
491, m_Network(nullptr, nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +0100492, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
493{
494 // register supported operators
Sadik Armagan66dedc72019-12-10 16:32:07 +0000495 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000496 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
497 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
498 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
499 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000500 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000501 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
Finn Williamsed66d142019-12-06 09:55:55 +0000502 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
Derek Lambertif0176992020-04-28 13:37:49 +0100503 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParser::ParseExp;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000504 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Sadik Armagan12239e72020-05-27 11:06:17 +0100505 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParser::ParseLeakyRelu;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000506 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
507 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
508 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
509 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000510 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000511 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000512 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Darshan Patel83fcf982020-05-26 22:22:42 +0530513 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParser::ParseNeg;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000514 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
515 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
516 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000517 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
518 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
519 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
520 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
521 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000522 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000523 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
524 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000525 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Derek Lambertif0176992020-04-28 13:37:49 +0100526 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParser::ParseSplitV;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000527 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
528 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
529 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000530 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
531 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
532 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
533 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
Darshan Patel42b3d7d2020-05-25 22:30:07 +0530534 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParser::ParseDiv;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100535 // register supported custom operators
536 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100537}
538
539void TfLiteParser::ResetParser()
540{
541 m_Network = armnn::INetworkPtr(nullptr, nullptr);
542 m_Model = nullptr;
543 m_SubgraphConnections.clear();
544}
545
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200546void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
547 size_t operatorIndex,
548 IConnectableLayer *layer)
549{
550 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100551 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200552
Derek Lambertiff05cc52019-04-26 13:05:17 +0100553 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
554 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200555
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100556 ARMNN_ASSERT(operatorPtr->inputs.size() > 1);
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200557
558 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100559 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200560 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100561 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200562
563 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
564 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
565
566 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
567 {
568 uint32_t id = reshapedInputId;
569 reshapedInputId = inputId;
570 inputId = id;
571
572 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
573 inputTensorInfo = ToTensorInfo(tensorPtr);
574 }
575
576 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
577
578 std::vector<unsigned> reshapedDim;
579 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
580 {
581 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
582 }
583
584 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
585 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
586
587 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
588
589 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
590 armnn::ReshapeDescriptor desc;
591 desc.m_TargetShape = reshapedTensorInfo.GetShape();
592 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
593
594 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
595 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
596
597 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
598
599 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
600 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
601}
602
telsoa01c577f2c2018-08-31 09:22:23 +0100603INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
604{
605 ResetParser();
606 m_Model = LoadModelFromFile(graphFile);
607 return CreateNetworkFromModel();
608}
609
610INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
611{
612 ResetParser();
613 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
614 return CreateNetworkFromModel();
615}
616
617INetworkPtr TfLiteParser::CreateNetworkFromModel()
618{
619 m_Network = INetwork::Create();
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100620 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100621
622 bool failedToCreate = false;
623 std::stringstream errors;
624
625 if (m_Model->subgraphs.size() != 1)
626 {
627 throw ParseException(
628 boost::str(
629 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
630 m_Model->subgraphs.size() %
631 CHECK_LOCATION().AsString()));
632 }
633
634 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100635 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100636 {
637 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
638
639 size_t operatorIndex = 0;
640 for (OperatorPtr const & op : subgraph->operators)
641 {
642 try
643 {
telsoa01c577f2c2018-08-31 09:22:23 +0100644 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
645 auto builtinCode = opCodePtr->builtin_code;
646
647 if (builtinCode > tflite::BuiltinOperator_MAX)
648 {
649 throw ParseException(
650 boost::str(
651 boost::format("Operator code %1% is out of range 0-%2%. "
652 "subgraph:%3% operator idx:%4%. %5%") %
653 builtinCode %
654 tflite::BuiltinOperator_MAX %
655 subgraphIndex %
656 operatorIndex %
657 CHECK_LOCATION().AsString()));
658 }
659
660 // lookup and call the parser function
661 auto & parserFunction = m_ParserFunctions[builtinCode];
662 (this->*parserFunction)(subgraphIndex, operatorIndex);
663 }
664 catch (const ParseException& e)
665 {
666 failedToCreate = true;
667 std::stringstream errorString;
668
669 errorString << "Failed to parse operator #" << operatorIndex
670 << " within subgraph #" << subgraphIndex
671 << " error: " << e.what();
Derek Lamberti08446972019-11-26 16:38:31 +0000672 ARMNN_LOG(error) << errorString.str();
telsoa01c577f2c2018-08-31 09:22:23 +0100673
674 errors << errorString.str() << "\n";
675 }
676 ++operatorIndex;
677 }
678
679 SetupInputLayers(subgraphIndex);
680 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200681 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100682
683 ++subgraphIndex;
684 }
685
686 if (failedToCreate)
687 {
688 // we can skip everything and let the outer exception handler deal with the error
689 throw ParseException(errors.str());
690 }
691
692 // establish the connections from the layer outputs to the inputs of the subsequent layers
693 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
694 {
695 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
696 {
697 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
698 {
699 for (size_t inputSlotIdx = 0;
700 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
701 ++inputSlotIdx)
702 {
703 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
704 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
705 }
706 }
707 }
708 }
709
710 return std::move(m_Network);
711}
712
713void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
714 size_t tensorIndex,
715 armnn::IOutputSlot* slot)
716{
717 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100718 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
719 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100720
721 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
722
723 // assuming there is only one producer for that tensor
724 if (tensorSlots.outputSlot != nullptr)
725 {
726 throw ParseException(boost::str(
727 boost::format("Another layer has already registered itself as the producer of "
728 "subgraph:%1% tensor:%2% %3%") %
729 subgraphIndex %
730 tensorIndex %
731 CHECK_LOCATION().AsString()));
732 }
733
734 tensorSlots.outputSlot = slot;
735}
736
737void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
738 size_t tensorIndex,
739 armnn::IInputSlot* slot)
740{
741 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100742 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
743 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100744
745 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
746 tensorSlots.inputSlots.push_back(slot);
747}
748
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100749void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
750{
751 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
752
753 // NOTE: By default we presume the custom operator is not supported
754 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
755
756 // Identify custom code defined for custom operator
757 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
758 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
759
760 // Find parser function that correspondes to custom code (if any)
761 auto iterator = m_CustomParserFunctions.find(customCode);
762 if (iterator != m_CustomParserFunctions.end())
763 {
764 customParserFunction = iterator->second;
765 }
766
767 // Run parser function
768 (this->*customParserFunction)(subgraphIndex, operatorIndex);
769}
770
telsoa01c577f2c2018-08-31 09:22:23 +0100771void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
772{
773 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100774
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100775 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
776
777 auto opcodeIndex = operatorPtr->opcode_index;
778 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
779
780 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
781 {
782 // Do not add StandInLayer, throw ParseException instead
783 throw ParseException(
784 boost::str(
785 boost::format("Operator not supported. "
786 "subgraph:%1% operator:%2% "
787 "opcode_index:%3% opcode:%4% / %5% %6%") %
788 subgraphIndex %
789 operatorIndex %
790 opcodeIndex %
791 opcode %
792 tflite::EnumNameBuiltinOperator(opcode) %
793 CHECK_LOCATION().AsString()));
794 }
795
796 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
797 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
798
799 const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
800 const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
801
802 StandInDescriptor descriptor(numInputs, numOutputs);
803 auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
804
805 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
806 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
807 for (unsigned int i = 0u; i < numOutputs; ++i)
808 {
809 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i]));
810 }
811
812 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
813 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
814
815 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
816 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100817}
818
telsoa01c577f2c2018-08-31 09:22:23 +0100819void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
820{
821 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
822
823 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
824 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
825
826 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
827
828 Convolution2dDescriptor desc;
829 desc.m_BiasEnabled = false;
830 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
831 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000832 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100833 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
834 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000835
telsoa01c577f2c2018-08-31 09:22:23 +0100836 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
837 CHECK_VALID_SIZE(inputs.size(), 2, 3);
838
839 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
840 CHECK_VALID_SIZE(outputs.size(), 1);
841
842 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
843 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
844
845 // assuming input is NHWC
846 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
847 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
848
849 // assuming the filter is OHWI : Output, H, W, Input
850 // which is essentially the same as NHWC
851 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
852 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
853
Pablo Tellof0bd6832019-04-26 17:58:13 +0100854 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
855 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
856 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
857 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100858
Matteo Martincigh747ef822018-12-18 09:26:39 +0000859 auto filterTensorAndData = CreateConstTensor(inputs[1],
860 filterTensorInfo,
861 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100862 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100863
864 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
865
866 if (inputs.size() == 3)
867 {
868 desc.m_BiasEnabled = true;
869 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000870 auto biasTensorAndData = CreateConstTensor(inputs[2],
871 biasTensorInfo,
872 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100873 layer = m_Network->AddConvolution2dLayer(desc,
874 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100875 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100876 layerName.c_str());
877 }
878 else
879 {
880 layer = m_Network->AddConvolution2dLayer(desc,
881 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100882 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100883 layerName.c_str());
884 }
885
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100886 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100887
telsoa01c577f2c2018-08-31 09:22:23 +0100888 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000889 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100890
891 // register the input connection slots for the layer, connections are made after all layers have been created
892 // only the tensors for the inputs are relevant, exclude the const tensors
893 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000894 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100895
jimfly01c25411c2018-11-14 17:47:22 +0000896 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100897 // register the output connection slots for the layer, connections are made after all layers have been created
898 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
899 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
900}
901
902void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
903{
904 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
905
906 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
907 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
908
909 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
910
911 DepthwiseConvolution2dDescriptor desc;
912 desc.m_BiasEnabled = false;
913 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
914 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000915 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100916 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100917
918 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
919 CHECK_VALID_SIZE(inputs.size(), 2, 3);
920 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
921 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100922 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
923 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000924
Keith Davis0c2eeac2020-02-11 16:51:50 +0000925 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
926 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
927
telsoa01c577f2c2018-08-31 09:22:23 +0100928 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000929 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1], permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100930
Matteo Martincigh747ef822018-12-18 09:26:39 +0000931 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100932 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
933 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000934
935 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100936 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
937 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
938
Matteo Martincigh747ef822018-12-18 09:26:39 +0000939 // Reshape weights as [ H, W, I, M ]
940 filterTensorInfo.SetShape({ filterHeight,
941 filterWidth,
942 inputTensorInfo.GetShape()[3],
943 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
944
Pablo Tellof0bd6832019-04-26 17:58:13 +0100945 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
946 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
947 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
948 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100949
Matteo Martincigh747ef822018-12-18 09:26:39 +0000950 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100951 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100952 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
953
954 if (inputs.size() == 3)
955 {
956 desc.m_BiasEnabled = true;
957 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000958 auto biasTensorAndData = CreateConstTensor(inputs[2],
959 biasTensorInfo,
960 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100961 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
962 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100963 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100964 layerName.c_str());
965 }
966 else
967 {
968 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
969 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100970 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100971 layerName.c_str());
972 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100973 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100974
telsoa01c577f2c2018-08-31 09:22:23 +0100975 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000976 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100977
978 // register the input connection slots for the layer, connections are made after all layers have been created
979 // only the tensors for the inputs are relevant, exclude the const tensors
980 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000981 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100982
jimfly01c25411c2018-11-14 17:47:22 +0000983 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100984 // register the output connection slots for the layer, connections are made after all layers have been created
985 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
986 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
987}
988
Finn Williamsed66d142019-12-06 09:55:55 +0000989void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
990{
991 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
992
993 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
994 CHECK_VALID_SIZE(inputs.size(), 1);
995
996 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
997 CHECK_VALID_SIZE(outputs.size(), 1);
998
999 auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
1000
1001 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001002 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +00001003
1004 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1005 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1006
1007 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1008 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1009
1010 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1011 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1012}
1013
Derek Lambertif0176992020-04-28 13:37:49 +01001014void TfLiteParser::ParseExp(size_t subgraphIndex, size_t operatorIndex)
1015{
1016 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1017
1018 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1019 CHECK_VALID_SIZE(inputs.size(), 1);
1020
1021 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1022 CHECK_VALID_SIZE(outputs.size(), 1);
1023
1024 auto layerName = boost::str(boost::format("Exp:%1%:%2%") % subgraphIndex % operatorIndex);
1025
1026 ElementwiseUnaryDescriptor desc;
1027 desc.m_Operation = UnaryOperation::Exp;
1028 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerName.c_str());
1029 ARMNN_ASSERT(layer != nullptr);
1030
1031 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1032 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1033
1034 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1035 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1036
1037 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1038 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1039}
1040
Keith Davis4cd29a02019-09-09 14:49:20 +01001041void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
1042{
1043 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1044
1045 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001046 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001047
1048 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1049 CHECK_VALID_SIZE(outputs.size(), 1);
1050
1051 armnn::IConnectableLayer* layer = nullptr;
1052 auto layerName = boost::str(boost::format("Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
1053
Mike Kelly08759e22020-03-02 11:41:31 +00001054 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001055
josh minorba424d22019-11-13 10:55:17 -06001056 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001057 {
1058 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1059 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001060 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1061 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001062 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001063 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001064
Mike Kelly08759e22020-03-02 11:41:31 +00001065 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001066 }
1067
Mike Kelly08759e22020-03-02 11:41:31 +00001068 layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Keith Davis4cd29a02019-09-09 14:49:20 +01001069
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001070 ARMNN_ASSERT(layer != nullptr);
Keith Davis4cd29a02019-09-09 14:49:20 +01001071
1072 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1073 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1074
1075 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1076 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1077
1078 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1079 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1080}
1081
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001082void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
1083{
1084 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1085
1086 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1087 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1088
1089 TransposeConvolution2dDescriptor desc;
1090 desc.m_BiasEnabled = false;
1091 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1092 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1093 desc.m_DataLayout = armnn::DataLayout::NHWC;
1094
1095 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001096 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001097
1098 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1099 CHECK_VALID_SIZE(outputs.size(), 1);
1100
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001101 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001102 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1103
1104 // TfLite uses NHWC tensors
1105 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1106 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1107
1108 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1109 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1110
1111 CalcPadding(inputHeight,
1112 filterHeight,
1113 desc.m_StrideY,
1114 1, // DilationY
1115 desc.m_PadTop,
1116 desc.m_PadBottom,
1117 options->padding);
1118
1119 CalcPadding(inputWidth,
1120 filterWidth,
1121 desc.m_StrideX,
1122 1, // DilationX
1123 desc.m_PadLeft,
1124 desc.m_PadRight,
1125 options->padding);
1126
1127 auto filterTensorAndData = CreateConstTensor(inputs[1],
1128 filterTensorInfo,
1129 armnn::Optional<armnn::PermutationVector&>());
1130
1131 armnn::IConnectableLayer* layer = nullptr;
1132 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
1133
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001134 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1135 filterTensorAndData.first,
1136 EmptyOptional(),
1137 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001138
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001139 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001140
1141 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1142 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1143
1144 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1145 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001146 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001147
1148 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1149 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1150}
1151
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001152void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
1153{
1154 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1155}
1156
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001157void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
1158{
1159 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1160
1161 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1162 CHECK_VALID_SIZE(inputs.size(), 3);
1163
1164 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1165 CHECK_VALID_SIZE(outputs.size(), 1);
1166
1167 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1168 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1169
1170 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1171 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1172
1173 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1174 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1175
1176 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1177 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1178
1179 size_t step = 2;
1180 std::vector<std::pair<unsigned int, unsigned int>> crops;
1181 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1182 {
1183 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1184 }
1185
1186 armnn::BatchToSpaceNdDescriptor desc;
1187 desc.m_BlockShape = blockShape;
1188 desc.m_Crops = crops;
1189 desc.m_DataLayout = armnn::DataLayout::NHWC;
1190
1191 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1192
1193 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1194 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1195
1196 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1197
1198 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1199 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1200
1201 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1202 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1203}
1204
Matthew Jackson28c94572019-07-18 10:47:03 +01001205void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1206{
1207 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1208
1209 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1210 CHECK_VALID_SIZE(inputs.size(), 1);
1211
1212 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1213 CHECK_VALID_SIZE(outputs.size(), 1);
1214
1215 L2NormalizationDescriptor desc;
1216 desc.m_DataLayout = armnn::DataLayout::NHWC;
1217 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1218 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1219
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001220 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001221
1222 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1223 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1224
1225 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1226 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1227
1228 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1229 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1230}
1231
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001232void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1233{
1234 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1235}
1236
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001237void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1238{
1239 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1240
1241 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1242 CHECK_VALID_SIZE(inputs.size(), 2);
1243
1244 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1245 CHECK_VALID_SIZE(outputs.size(), 1);
1246
1247 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1248 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1249
1250 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1251 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1252
1253 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1254 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1255
1256 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1257 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1258 {
1259 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1260 }
1261 else
1262 {
1263 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1264 }
1265
1266 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1267 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1268}
1269
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001270void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1271{
1272 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1273
1274 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1275 CHECK_VALID_SIZE(inputs.size(), 2);
1276
1277 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1278 CHECK_VALID_SIZE(outputs.size(), 1);
1279
1280 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1281 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1282
1283 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1284 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1285
1286 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1287 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1288
1289 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1290 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1291 {
1292 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1293 }
1294 else
1295 {
1296 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1297 }
1298
1299 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1300 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1301}
1302
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001303void TfLiteParser::ParsePool(size_t subgraphIndex,
1304 size_t operatorIndex,
1305 PoolingAlgorithm algorithm)
1306{
1307 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1308
1309 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1310 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1311
1312 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1313
1314 std::string layerName;
1315
1316 switch (algorithm)
1317 {
1318 case PoolingAlgorithm::Average:
1319 layerName =
1320 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1321 break;
1322 case PoolingAlgorithm::Max:
1323 layerName =
1324 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1325 break;
1326 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001327 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001328 }
1329
1330 Pooling2dDescriptor desc;
1331
1332 desc.m_PoolType = algorithm;
1333 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1334 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1335 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1336 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1337 desc.m_PaddingMethod = PaddingMethod::Exclude;
1338 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001339 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001340
1341 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1342 CHECK_VALID_SIZE(inputs.size(), 1);
1343 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1344
1345 // assuming input is NHWC
1346 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1347 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1348
Pablo Tellof0bd6832019-04-26 17:58:13 +01001349 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1350 desc.m_PadTop, desc.m_PadBottom, options->padding);
1351 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1352 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001353
1354 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1355 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001356
1357 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1358
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001359 ARMNN_ASSERT(layer != nullptr);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001360
jimfly01c25411c2018-11-14 17:47:22 +00001361 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1362 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001363
1364 // register the input connection slots for the layer, connections are made after all layers have been created
1365 // only the tensors for the inputs are relevant, exclude the const tensors
1366 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001367 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001368
jimfly01c25411c2018-11-14 17:47:22 +00001369 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001370 // register the output connection slots for the layer, connections are made after all layers have been created
1371 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1372 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1373}
1374
josh minorba424d22019-11-13 10:55:17 -06001375void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
1376{
1377 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1378
1379 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1380 CHECK_VALID_SIZE(inputs.size(), 3);
1381 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1382 CHECK_VALID_SIZE(outputs.size(), 1);
1383
1384 SliceDescriptor desc;
1385
1386 // set begin tensor info for slice descriptor
1387 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1388 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1389
1390 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1391 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1392
1393 // set size tensor info for slice descriptor
1394 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1395 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1396
1397 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1398 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1399 desc = SliceDescriptor(begin, size);
1400
1401 auto layerName = boost::str(boost::format("Slice:%1%:%2%") % subgraphIndex % operatorIndex);
1402 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
1403
1404 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1405 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1406
1407 // register the input connection slots for the layer, connections are made after all layers have been created
1408 // only the tensors for the inputs are relevant, exclude the const tensors
1409 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1410 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1411
1412 // register the output connection slots for the layer, connections are made after all layers have been created
1413 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1414 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1415}
1416
telsoa01c577f2c2018-08-31 09:22:23 +01001417void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1418{
1419 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1420 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1421 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1422
1423 SoftmaxDescriptor desc;
1424 desc.m_Beta = options->beta;
1425
1426 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1427 CHECK_VALID_SIZE(inputs.size(), 1);
1428 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1429 CHECK_VALID_SIZE(outputs.size(), 1);
1430
1431 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1432 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1433
1434 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1435 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1436
1437 // register the input connection slots for the layer, connections are made after all layers have been created
1438 // only the tensors for the inputs are relevant, exclude the const tensors
1439 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1440 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1441
1442 // register the output connection slots for the layer, connections are made after all layers have been created
1443 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1444 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1445}
1446
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001447void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1448{
1449 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1450
1451 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1452 CHECK_VALID_SIZE(inputs.size(), 3);
1453
1454 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1455 CHECK_VALID_SIZE(outputs.size(), 1);
1456
1457 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1458 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1459
1460 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1461 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1462
1463 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1464 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1465
1466 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1467 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1468
1469 size_t step = 2;
1470 std::vector<std::pair<unsigned int, unsigned int>> padList;
1471 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1472 {
1473 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1474 }
1475
1476 armnn::SpaceToBatchNdDescriptor desc;
1477 desc.m_BlockShape = blockShape;
1478 desc.m_PadList = padList;
1479 desc.m_DataLayout = armnn::DataLayout::NHWC;
1480
1481 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1482
1483 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1484 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1485
1486 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1487
1488 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1489 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1490
1491 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1492 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1493}
1494
telsoa01c577f2c2018-08-31 09:22:23 +01001495armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1496 const armnn::TensorInfo & inputTensorInfo)
1497{
1498 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1499 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1500 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1501
1502 if (inputTensorInfo.GetNumDimensions() > 4)
1503 {
1504 std::stringstream ss;
1505 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1506 << " shape:" << inputTensorInfo.GetShape() << " "
1507 << CHECK_LOCATION().AsString();
1508 throw ParseException(ss.str());
1509 }
1510
1511 if (squeezeDims.empty())
1512 {
1513 squeezeDims.assign(dimensionSequence,
1514 dimensionSequence+inputTensorInfo.GetNumDimensions());
1515 }
1516
1517 std::vector<uint32_t> outputDims;
1518 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1519 {
1520 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1521 auto currentDimension = inputTensorInfo.GetShape()[i];
1522 if (skipSqueeze || currentDimension != 1)
1523 {
1524 outputDims.push_back(currentDimension);
1525 }
1526 }
1527
1528 if (outputDims.size() > 4)
1529 {
1530 std::stringstream ss;
1531 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1532 << " shape:" << inputTensorInfo.GetShape() << " "
1533 << CHECK_LOCATION().AsString();
1534 throw ParseException(ss.str());
1535 }
1536
1537 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1538 outputDims.data());
1539
1540 // we need to preserve the tensor type and the quantization data as well
1541 TensorInfo outTensorInfo = inputTensorInfo;
1542 outTensorInfo.SetShape(outShape);
1543
1544 return outTensorInfo;
1545}
1546
1547void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1548{
1549 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1550
1551 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1552 CHECK_VALID_SIZE(inputs.size(), 1);
1553
1554 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1555 CHECK_VALID_SIZE(outputs.size(), 1);
1556
1557 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1558 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1559
1560 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1561 armnn::TensorInfo outputTensorInfo =
1562 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1563 inputTensorInfo);
1564
1565 ReshapeDescriptor reshapeDesc;
1566 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1567
1568 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1569 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1570 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1571
1572 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1573 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1574
1575 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1576 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1577}
1578
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001579void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1580{
1581 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1582
1583 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1584 CHECK_VALID_SIZE(inputs.size(), 4);
1585
1586 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1587 CHECK_VALID_SIZE(outputs.size(), 1);
1588
1589 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1590 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1591
1592 StridedSliceDescriptor desc;
1593 desc.m_BeginMask = options->begin_mask;
1594 desc.m_EllipsisMask = options->ellipsis_mask;
1595 desc.m_EndMask = options->end_mask;
1596 desc.m_NewAxisMask = options->new_axis_mask;
1597 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1598 desc.m_DataLayout = armnn::DataLayout::NHWC;
1599
1600 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1601 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1602
1603 std::vector<int> begin(beginTensorInfo.GetNumElements());
1604 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1605
1606 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1607 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1608
1609 std::vector<int> end(endTensorInfo.GetNumElements());
1610 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1611
1612 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1613 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1614
1615 std::vector<int> stride(strideTensorInfo.GetNumElements());
1616 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1617
1618 desc.m_Begin = begin;
1619 desc.m_End = end;
1620 desc.m_Stride = stride;
1621
1622 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1623 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1624
1625 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1626 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1627
1628 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1629 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1630
1631 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1632 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1633}
1634
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001635void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1636{
1637 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1638
1639 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1640 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1641
1642 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1643 CHECK_VALID_SIZE(inputs.size(), 2);
1644
1645 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1646 CHECK_VALID_SIZE(outputs.size(), 1);
1647
1648 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1649 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1650
1651 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1652 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1653
1654 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1655 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1656
1657 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1658 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1659 {
1660 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1661 }
1662 else
1663 {
1664 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1665 }
1666
1667 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1668
1669 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1670 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1671}
1672
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301673void TfLiteParser::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
1674{
1675 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1676
1677 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1678 const auto * options = operatorPtr->builtin_options.AsDivOptions();
1679
1680 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1681 CHECK_VALID_SIZE(inputs.size(), 2);
1682
1683 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1684 CHECK_VALID_SIZE(outputs.size(), 1);
1685
1686 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1687 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1688
1689 auto layerName = boost::str(boost::format("Div:%1%:%2%") % subgraphIndex % operatorIndex);
1690 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
1691
1692 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1693 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1694
1695 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1696 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1697 {
1698 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1699 }
1700 else
1701 {
1702 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1703 }
1704
1705 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1706
1707 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1708 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1709}
1710
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001711void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1712{
1713 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1714
1715 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1716 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1717
1718 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1719 CHECK_VALID_SIZE(inputs.size(), 2);
1720
1721 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1722 CHECK_VALID_SIZE(outputs.size(), 1);
1723
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001724 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1725 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1726
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001727 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1728 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1729
1730 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1731 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1732
1733 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001734 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1735 {
1736 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1737 }
1738 else
1739 {
1740 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1741 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001742
1743 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1744
1745 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1746 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1747}
1748
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001749void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1750{
1751 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1752
1753 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1754 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1755
1756 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1757 CHECK_VALID_SIZE(inputs.size(), 2);
1758
1759 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1760 CHECK_VALID_SIZE(outputs.size(), 1);
1761
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001762 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1763 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1764
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001765 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1766 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1767
1768 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1769 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1770
1771 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001772 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1773 {
1774 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1775 }
1776 else
1777 {
1778 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1779 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001780
1781 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1782
1783 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1784 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1785}
1786
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001787void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1788{
1789 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1790
1791 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1792
1793 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1794 CHECK_VALID_SIZE(outputs.size(), 1);
1795
1796 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1797 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1798
1799 armnn::MeanDescriptor desc;
1800 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1801 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1802 desc.m_Axis = axis;
1803
1804 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1805 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1806
1807 desc.m_KeepDims =
1808 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1809 true : false;
1810
1811 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1812 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1813
1814 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1815
1816 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1817 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1818
1819 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1820 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1821}
1822
Darshan Patel83fcf982020-05-26 22:22:42 +05301823void TfLiteParser::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
1824{
1825 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1826
1827 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1828 CHECK_VALID_SIZE(inputs.size(), 1);
1829
1830 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1831 CHECK_VALID_SIZE(outputs.size(), 1);
1832
1833 auto layerName = boost::str(boost::format("Neg:%1%:%2%") % subgraphIndex % operatorIndex);
1834 armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Neg);
1835 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
1836 ARMNN_ASSERT(layer != nullptr);
1837
1838 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1839 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1840
1841 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1842 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1843
1844 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1845 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1846}
1847
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001848void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1849{
1850 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1851
1852 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1853
1854 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1855 CHECK_VALID_SIZE(outputs.size(), 1);
1856
1857 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1858 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1859
1860 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1861 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1862
1863 size_t step = 2;
1864 armnn::PadDescriptor desc;
1865 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1866 {
1867 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1868 }
1869
1870 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1871 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1872
1873 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1874 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1875
1876 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1877 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1878
1879 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1880 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1881}
1882
Sadik Armagan66dedc72019-12-10 16:32:07 +00001883void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
1884{
1885 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1886
1887 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1888 CHECK_VALID_SIZE(inputs.size(), 1);
1889
1890 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1891 CHECK_VALID_SIZE(outputs.size(), 1);
1892
1893 auto layerName = boost::str(boost::format("Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
1894
1895 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001896 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001897
1898 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1899 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1900
1901 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1902 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1903
1904 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1905 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1906}
Finn Williamsc42c3842019-01-22 14:18:11 +00001907
Sadik Armagan58f39192018-09-17 14:14:39 +01001908void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1909{
Finn Williamsc42c3842019-01-22 14:18:11 +00001910 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001911}
1912
1913void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1914{
Finn Williamsc42c3842019-01-22 14:18:11 +00001915 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1916}
Sadik Armagan58f39192018-09-17 14:14:39 +01001917
Sadik Armagan12239e72020-05-27 11:06:17 +01001918void TfLiteParser::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
1919{
1920 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::LeakyReLu);
1921}
1922
Finn Williamsc42c3842019-01-22 14:18:11 +00001923void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1924{
1925 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1926}
1927
Nina Drozd99851762019-04-09 09:37:38 +01001928void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1929{
1930 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1931}
1932
Finn Williamsc42c3842019-01-22 14:18:11 +00001933
1934void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1935{
1936 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001937 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00001938 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01001939
1940 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1941 CHECK_VALID_SIZE(inputs.size(), 1);
1942
1943 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1944 CHECK_VALID_SIZE(outputs.size(), 1);
1945
Finn Williamsc42c3842019-01-22 14:18:11 +00001946 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001947 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001948 activationDesc.m_Function = activationType;
1949
1950 switch (activationType)
1951 {
1952 case ActivationFunction::ReLu:
1953 {
1954 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1955 break;
1956 }
1957 case ActivationFunction::BoundedReLu:
1958 {
1959 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1960 activationDesc.m_A = 6.0f;
1961 activationDesc.m_B = 0.0f;
1962 break;
1963 }
1964 case ActivationFunction::Sigmoid:
1965 {
1966 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1967 break;
1968 }
Nina Drozd99851762019-04-09 09:37:38 +01001969 case ActivationFunction::TanH:
1970 {
1971 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1972 activationDesc.m_A = 1.0f;
1973 activationDesc.m_B = 1.0f;
1974 break;
1975 }
Sadik Armagan12239e72020-05-27 11:06:17 +01001976 case ActivationFunction::LeakyReLu:
1977 {
1978 layerName += str(boost::format("LEAKYRELU:%1%:%2%") % subgraphIndex % operatorIndex);
1979 const auto * options = operatorPtr->builtin_options.AsLeakyReluOptions();
1980 activationDesc.m_A = options->alpha;
1981 break;
1982 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001983 default:
1984 {
1985 throw ParseException(
1986 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1987 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1988 }
1989 }
1990
1991 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001992
1993 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1994 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1995
1996 // register the input connection slots for the layer, connections are made after all layers have been created
1997 // only the tensors for the inputs are relevant, exclude the const tensors
1998 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1999 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2000
2001 // register the output connection slots for the layer, connections are made after all layers have been created
2002 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2003 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2004}
Sadikb94967b2018-09-19 15:30:00 +01002005armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
2006 const std::vector<int32_t> & targetDimsIn)
2007{
2008 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2009 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2010
2011 if (stretchDim != targetDimsIn.end())
2012 {
2013 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2014 {
2015 throw ParseException(
2016 boost::str(
2017 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
2018 }
2019
2020 auto targetNumElements =
2021 boost::numeric_cast<unsigned int>(
2022 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2023
2024 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2025 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2026 }
2027
2028 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2029
2030 TensorInfo reshapeInfo = inputTensorInfo;
2031 reshapeInfo.SetShape(outputShape);
2032
2033 return reshapeInfo;
2034}
2035
2036void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
2037{
2038 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2039
2040 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002041
2042 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2043 CHECK_VALID_SIZE(outputs.size(), 1);
2044
2045 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2046 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
2047
2048 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00002049 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
Derek Lambertic9e52792020-03-11 11:42:26 +00002050
2051 std::vector<int32_t> targetShape;
2052 if (inputs.size() > 1 && inputs[1] != nullptr)
2053 {
Derek Lambertic9e52792020-03-11 11:42:26 +00002054 if (inputs[1]->is_variable)
2055 {
2056 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
2057 }
2058
2059 if (inputs[1]->shape.size() != 1)
2060 {
2061 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
2062 }
2063
2064 if (inputs[1]->type != tflite::TensorType_INT32)
2065 {
2066 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
2067 }
2068
2069 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2070 auto vals = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
2071 for (int i=0; i < inputs[1]->shape[0]; i++)
2072 {
2073 targetShape.push_back(vals[i]);
2074 }
Derek Lambertif4a953f2020-03-17 14:25:57 +00002075
2076 if (options != nullptr &&
2077 options->new_shape.empty() == false &&
2078 options->new_shape != targetShape)
2079 {
2080 ARMNN_THROW_PARSE_EXCEPTION("Target shape defined in reshape parameters and as input tensor but "
2081 "the values do not match");
2082 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002083 }
2084 else
2085 {
2086 if (options == nullptr)
2087 {
2088 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
2089 "At least one method required");
2090 }
2091
2092 targetShape = options->new_shape;
2093 }
2094
kevmay0171972a82018-12-17 14:28:03 +00002095 armnn::TensorInfo reshapeOutputTensorInfo =
Derek Lambertic9e52792020-03-11 11:42:26 +00002096 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01002097
kevmay0171972a82018-12-17 14:28:03 +00002098 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002099 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
2100 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00002101 {
2102 std::stringstream ss;
2103 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002104 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00002105 << " does not equal output shape "
2106 << actualOutputTensorInfo.GetShape()
2107 << ": "
2108 << CHECK_LOCATION().AsString();
2109 throw ParseException(ss.str());
2110 }
2111
Sadikb94967b2018-09-19 15:30:00 +01002112 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00002113 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01002114
2115 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
2116 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00002117 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01002118
2119 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2120 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2121
2122 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2123 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2124}
2125
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002126void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
2127{
Sadik Armagana3b31f02019-12-05 09:08:53 +00002128 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
2129}
2130
2131void TfLiteParser::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
2132{
2133 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
2134}
2135
2136void TfLiteParser::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
2137{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002138 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2139
2140 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2141 CHECK_VALID_SIZE(inputs.size(), 2);
2142
2143 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2144 CHECK_VALID_SIZE(outputs.size(), 1);
2145
2146 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
2147
2148 // Data for the parsed tensor args (size) must be stored locally.
2149 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2150
2151 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2152 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2153
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002154 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002155 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002156 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002157 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2158 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002159
Sadik Armagana3b31f02019-12-05 09:08:53 +00002160 auto layerName = str(boost::format("Resize:"));
2161
2162 switch (resizeMethod)
2163 {
2164 case ResizeMethod::Bilinear:
2165 {
2166 layerName += str(boost::format("BILINEAR:%1%:%2%") % subgraphIndex % operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002167
2168 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2169 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2170
David Monahan4a0c9b92020-05-30 09:48:39 +01002171 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002172 break;
2173 }
2174 case ResizeMethod::NearestNeighbor:
2175 {
2176 layerName += str(boost::format("NEARESTNEIGHBOR:%1%:%2%") % subgraphIndex % operatorIndex);
2177 break;
2178 }
2179 default:
2180 {
2181 throw ParseException(
2182 boost::str(boost::format("Unexpected ResizeMethod[%1%] when creating layerName "
2183 " %2% ") %static_cast<int>(resizeMethod)% CHECK_LOCATION().AsString()));
2184 }
2185 }
2186
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002187 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002188
2189 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2190 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2191
2192 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2193 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2194
2195 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2196 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2197}
2198
Sadik Armagan479045b2018-10-01 11:51:37 +01002199void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
2200{
2201 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2202
2203 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2204 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2205
2206 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2207
2208 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2209 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2210 CHECK_VALID_SIZE(outputs.size(), 1);
2211
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002212 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2213 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002214
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002215 const unsigned int concatDimInput = static_cast<unsigned int>(
2216 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002217
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002218 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2219 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002220
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002221 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002222
2223 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2224 {
2225 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2226
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002227 // This set up concatDescriptor view origin
2228 armnnUtils::ProcessConcatInputTensorInfo(
2229 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002230 }
2231
2232 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01002233 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01002234
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002235 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan479045b2018-10-01 11:51:37 +01002236
2237 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2238 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01002239
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002240 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002241
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002242 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002243
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002244 // add fused activation layer
2245 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002246
Sadik Armagan479045b2018-10-01 11:51:37 +01002247 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2248 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2249}
2250
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002251void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
2252{
2253 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2254
2255 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2256 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2257
2258 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2259
2260 FullyConnectedDescriptor desc;
2261 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002262 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002263
2264 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2265 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2266 CHECK_VALID_SIZE(outputs.size(), 1);
2267
2268 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2269
2270 // Fully Connected Layer accepts two dimensional weights input
2271 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2272 if (weightsDimension != 2)
2273 {
2274 throw ParseException(
2275 boost::str(
2276 boost::format(
2277 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
2278 "Node %2%")
2279 % weightsDimension
2280 % CHECK_LOCATION().AsString()));
2281 }
2282
Matteo Martincigh747ef822018-12-18 09:26:39 +00002283 auto filterTensorAndData = CreateConstTensor(inputs[1],
2284 filterTensorInfo,
2285 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002286 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002287 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
2288
2289 if (inputs.size() == 3)
2290 {
2291 desc.m_BiasEnabled = true;
2292 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00002293 auto biasTensorAndData = CreateConstTensor(inputs[2],
2294 biasTensorInfo,
2295 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002296 layer = m_Network->AddFullyConnectedLayer(desc,
2297 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002298 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002299 layerName.c_str());
2300 }
2301 else
2302 {
2303 layer = m_Network->AddFullyConnectedLayer(desc,
2304 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002305 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002306 layerName.c_str());
2307 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002308 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002309
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002310 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2311
2312 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2313
2314 if (inputTensorInfo.GetNumDimensions() > 2)
2315 {
2316 // Add reshape to flatten to 2D [batch_size, input_size],
2317 // where "input_size" corresponds to the number of inputs to the layer,
2318 // matching the second dimension of weights,
2319 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2320 std::vector<unsigned int> reshapedDimensions(2);
2321 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2322 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2323
2324 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2325 {
2326 throw ParseException(
2327 boost::str(
2328 boost::format(
2329 "Failed to deduce input tensor shape from filter size %1%")
2330 % reshapedDimensions[1]
2331 % CHECK_LOCATION().AsString()));
2332 }
2333
2334 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2335 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2336
2337 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2338 armnn::ReshapeDescriptor desc;
2339 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2340 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2341
2342 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2343 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2344
2345 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2346 }
2347 else
2348 {
2349 // register the input connection slot for the layer
2350 // only the tensors for the inputs are relevant, exclude the const tensors
2351 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2352 }
2353
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002354 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2355 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2356
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002357 // we need to add the activation layer and fortunately we don't need to care about the data layout
2358 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2359 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002360
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002361 // register the output connection slots for the layer, connections are made after all layers have been created
2362 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2363 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2364}
2365
keidav011b3e2ea2019-02-21 10:07:37 +00002366void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
2367{
2368 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2369
2370 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2371
2372 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2373 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2374 CHECK_VALID_SIZE(outputs.size(), 4);
2375
2376 // Obtain custom options from flexbuffers
2377 auto custom_options = operatorPtr->custom_options;
2378 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2379
2380 // Obtain descriptor information from tf lite
2381 DetectionPostProcessDescriptor desc;
2382 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2383 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2384 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2385 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2386 desc.m_NumClasses = m["num_classes"].AsUInt32();
2387 desc.m_ScaleH = m["h_scale"].AsFloat();
2388 desc.m_ScaleW = m["w_scale"].AsFloat();
2389 desc.m_ScaleX = m["x_scale"].AsFloat();
2390 desc.m_ScaleY = m["y_scale"].AsFloat();
2391
keidav0107d58c72019-02-26 11:57:39 +00002392 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002393 {
keidav0107d58c72019-02-26 11:57:39 +00002394 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002395 }
2396 if (!(m["detections_per_class"].IsNull()))
2397 {
2398 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2399 }
2400
2401 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2402 {
2403 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2404 "must be positive and less than or equal to 1.");
2405 }
2406
2407 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2408 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2409 armnn::Optional<armnn::PermutationVector&>());
2410
2411 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2412 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2413 layerName.c_str());
2414
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002415 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00002416
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002417 // The model does not specify the output shapes.
2418 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2419 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2420 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2421 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2422 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2423 m_OverridenOutputShapes.push_back({ 1 });
2424
keidav011b3e2ea2019-02-21 10:07:37 +00002425 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2426 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002427 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002428 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2429 }
2430
2431 // Register the input connection slots for the layer, connections are made after all layers have been created
2432 // only the tensors for the inputs are relevant, exclude the const tensors
2433 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2434 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2435
2436 // Register the output connection slots for the layer, connections are made after all layers have been created
2437 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2438 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2439 outputTensorIndexes[1],
2440 outputTensorIndexes[2],
2441 outputTensorIndexes[3]});
2442}
2443
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002444/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2445void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2446{
2447 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2448
2449 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2450 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2451 CHECK_VALID_SIZE(outputs.size(), 1);
2452
2453 if (inputs.size() < 1)
2454 {
2455 throw ParseException("Pack must have at least one input.");
2456 }
2457
2458 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2459 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2460
2461 StackDescriptor desc;
2462 desc.m_Axis = static_cast<uint32_t>(options->axis);
2463 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2464
2465 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2466 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2467 desc.m_InputShape = inputTensorInfo.GetShape();
2468
2469 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2470 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2471
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002472 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002473
2474 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2475 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2476
2477 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2478 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2479
2480 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2481 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2482}
2483
Nina Drozd200e3802019-04-15 09:47:39 +01002484void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2485{
2486 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2487
2488 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2489 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2490
2491 // This unpackAxis indicates the axis to unpack
2492 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2493
2494 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2495 CHECK_VALID_SIZE(inputs.size(), 1);
2496
2497 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002498
2499 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2500 {
2501 throw ParseException(
2502 boost::str(
2503 boost::format(
2504 "The unpack axis: %1% cannot be greater than or equal to "
2505 "the number of input dimension %2% %3%")
2506 % unpackAxis
2507 % inputTensorInfo.GetNumDimensions()
2508 % CHECK_LOCATION().AsString()));
2509 }
2510
Nina Drozd200e3802019-04-15 09:47:39 +01002511 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2512 // If num is not defined, automatically infer from the length of the dimension axis.
2513 if(unpackNum == 0)
2514 {
2515 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2516 }
2517
2518 // If unpack number cannot be inferred and is still zero, throw ParseException.
2519 if(unpackNum == 0)
2520 {
2521 throw ParseException("Number to unpack must greater than zero.");
2522 }
2523
2524 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2525 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2526
2527 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2528 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2529
2530 // Add current input shape to unpackDimSizes
2531 for (unsigned int i = 0; i < inputDimSize; ++i)
2532 {
2533 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2534 }
2535
2536 if (unpackDimSizes[unpackAxis] != unpackNum)
2537 {
2538 throw ParseException("Number to unpack must be the same as length of the dimension to "
2539 "unpack along.");
2540 }
2541
2542 unpackDimSizes[unpackAxis] /= unpackNum;
2543
2544 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2545 for (unsigned int j = 0; j < unpackNum; ++j)
2546 {
2547 // Set the size of the views.
2548 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2549 {
2550 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2551 }
2552 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2553 }
2554
2555 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2556 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2557
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002558 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2559 unpackDimSizes.data());
2560
Nina Drozd200e3802019-04-15 09:47:39 +01002561 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2562 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2563
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002564 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2565 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2566 {
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002567 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002568 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2569 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002570 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002571 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2572
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002573 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2574 outputTensorInfo.GetDataType(),
2575 outputTensorInfo.GetQuantizationScale(),
2576 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002577 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2578
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002579 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002580
2581 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2582 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2583 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2584 }
Nina Drozd200e3802019-04-15 09:47:39 +01002585}
2586
Nina Drozd0324f482019-04-08 10:52:10 +01002587void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2588{
2589 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2590
2591 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2592 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2593
2594 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2595
Nina Drozd200e3802019-04-15 09:47:39 +01002596 // If number of splits cannot be inferred and is zero, throw ParseException.
2597 if(numSplits == 0)
2598 {
2599 throw ParseException("Number to splits must greater than zero.");
2600 }
2601
Nina Drozd0324f482019-04-08 10:52:10 +01002602 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2603 CHECK_VALID_SIZE(inputs.size(), 2);
2604 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2605 CHECK_VALID_SIZE(outputs.size(), numSplits);
2606
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002607 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2608 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002609
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002610 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2611 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2612 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2613
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002614 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002615 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002616
Nina Drozd0324f482019-04-08 10:52:10 +01002617 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002618 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002619 {
2620 throw ParseException(
2621 boost::str(
2622 boost::format(
2623 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002624 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002625 % inputTensorInfo.GetNumDimensions()
2626 % MaxNumOfTensorDimensions
2627 % CHECK_LOCATION().AsString()));
2628 }
2629
2630 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2631
2632 // Add current input shape to splitterDimSizes
2633 for (unsigned int i = 0; i < inputDimSize; ++i)
2634 {
2635 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2636 }
2637
2638 if (splitterDimSizes[splitDim] % numSplits != 0)
2639 {
2640 throw ParseException("Number of splits must evenly divide the dimension");
2641 }
2642 splitterDimSizes[splitDim] /= numSplits;
2643
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002644 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002645 for (unsigned int j = 0; j < numSplits; ++j)
2646 {
2647 // Set the size of the views.
2648 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2649 {
2650 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2651 }
2652 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2653 }
2654
2655 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2656 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2657
2658 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002659 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002660
Nina Drozd0324f482019-04-08 10:52:10 +01002661 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2662 {
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002663 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k]);
2664 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002665 }
2666
2667 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2668 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2669}
2670
Derek Lambertif0176992020-04-28 13:37:49 +01002671unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
2672{
2673 int numDims = armnn::numeric_cast<int>(numDimsIn);
2674 int v = idx < 0 ? numDims + idx : idx;
2675 ARMNN_ASSERT(v >= 0);
2676 ARMNN_ASSERT(v < numDims);
2677
2678 return static_cast<unsigned int>(v);
2679}
2680
2681void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
2682{
2683 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2684
2685 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Ryan OShea86704732020-05-26 11:41:04 +01002686 const auto * options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01002687
2688 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2689 CHECK_VALID_SIZE(inputs.size(), 3);
2690
2691 auto& inputTensor = inputs[0];
2692 auto& splitsTensor = inputs[1];
2693 auto& axisTensor = inputs[2];
2694
2695 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
2696 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
2697 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
2698 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
2699
2700 // Inputs
2701 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2702 if (inputDimSize > MaxNumOfTensorDimensions)
2703 {
2704 throw ParseException(
2705 boost::str(
2706 boost::format(
2707 "The number of dimensions: %1% for input tensors of the "
2708 "split op cannot be greater than %2% %3%")
2709 % inputTensorInfo.GetNumDimensions()
2710 % MaxNumOfTensorDimensions
2711 % CHECK_LOCATION().AsString()));
2712 }
2713
2714 // Get split axis
2715 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
2716 std::vector<int> axisData(axisTensorInfo.GetNumElements());
2717 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2718 const unsigned int splitDim = ComputeWrappedIndex(axisData[0], inputTensorInfo.GetNumDimensions());
2719
Derek Lambertif0176992020-04-28 13:37:49 +01002720 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01002721 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Derek Lambertif0176992020-04-28 13:37:49 +01002722 std::vector<int> splitsData(0);
Ryan OShea86704732020-05-26 11:41:04 +01002723 unsigned int numSplits{0};
2724
2725 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01002726 {
2727 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01002728 }
2729 else
2730 {
Ryan OShea86704732020-05-26 11:41:04 +01002731 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01002732 }
2733
2734 if (numSplits <=0)
2735 {
2736 throw ParseException("SplitV has invalid number of splits");
2737 }
2738
Ryan OShea86704732020-05-26 11:41:04 +01002739 splitsData.resize(numSplits);
2740 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
2741 unsigned int idx{0};
2742
2743 for(auto& split: splitsData)
2744 {
2745 split = splitsBufferPtr->data[idx];
2746 idx++;
2747 }
2748
2749 idx = 0;
2750 int numInferred{0};
2751 unsigned int inferIdx{0};
2752 int splitSum{0};
2753 for (auto split : splitsData)
2754 {
2755 if (split < 0)
2756 {
2757 numInferred++;
2758 inferIdx = idx;
2759 }
2760 else
2761 {
2762 splitSum += split;
2763 }
2764 idx++;
2765 }
2766 // Check for inferred Axis
2767 if (numInferred == 0)
2768 {
2769 if (splitSum != numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
2770 {
2771 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
2772 }
2773 }
2774 else if (numInferred == 1)
2775 {
2776 splitsData[inferIdx] = numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
2777 }
2778 else
2779 {
2780 throw ParseException("Cannot infer split size for more than one split");
2781 }
2782
Derek Lambertif0176992020-04-28 13:37:49 +01002783 //Ouput size validation
2784 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2785 CHECK_VALID_SIZE(outputs.size(), numSplits);
2786
2787 // Setup Armnn descriptor
2788 SplitterDescriptor splitDesc(numSplits, inputDimSize);
2789 unsigned int accumSplit = 0;
2790 for (unsigned int j = 0; j < numSplits; ++j)
2791 {
2792 unsigned int splitSize = numeric_cast<unsigned int>(splitsData[j]);
2793
2794 // Set the size of the views.
2795 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
2796 {
2797 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
2798 if (dimIdx == splitDim)
2799 {
2800 dimSize = splitSize;
2801 }
2802 splitDesc.SetViewSize(j, dimIdx, dimSize);
2803 }
2804
2805 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
2806 accumSplit += splitSize;
2807 }
2808
Ryan OShea86704732020-05-26 11:41:04 +01002809 auto layerName = boost::str(boost::format("SplitV:%1%:%2%") % subgraphIndex % operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01002810 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2811
2812 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2813 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2814
2815 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2816 {
2817 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k]);
2818 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
2819 }
2820
2821 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2822 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2823}
2824
Sadik Armagan58f39192018-09-17 14:14:39 +01002825armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2826 unsigned int outputSlot,
2827 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002828{
2829 ActivationDescriptor activationDesc;
2830 std::string layerName = prevLayer->GetName();
2831
2832 switch(activationType)
2833 {
2834 case tflite::ActivationFunctionType_NONE:
2835 {
2836 // this is a no-op: return previous layer
2837 return prevLayer;
2838 }
2839 case tflite::ActivationFunctionType_RELU:
2840 {
2841 activationDesc.m_Function = ActivationFunction::ReLu;
2842 layerName += ":RELU";
2843 break;
2844 }
2845 case tflite::ActivationFunctionType_RELU6:
2846 {
2847 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2848 activationDesc.m_A = 6.0f;
2849 activationDesc.m_B = 0.0f;
2850 layerName += ":RELU6";
2851 break;
2852 }
2853 case tflite::ActivationFunctionType_TANH:
2854 {
2855 activationDesc.m_Function = ActivationFunction::TanH;
2856 activationDesc.m_A = 1.0f;
2857 activationDesc.m_B = 1.0f;
2858 layerName += ":TANH";
2859 break;
2860 }
2861
2862 // I only put these here as a reminder what others we could support
2863 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2864 case tflite::ActivationFunctionType_SIGN_BIT:
2865 default:
2866 {
2867 throw ParseException(
2868 boost::str(
2869 boost::format("TfLite parser doesn't suppport fused activation: "
2870 "%1%/%2% %3% ") %
2871 activationType %
2872 tflite::EnumNameActivationFunctionType(activationType) %
2873 CHECK_LOCATION().AsString()));
2874
2875 }
2876 }
2877
2878 IConnectableLayer* activationLayer =
2879 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2880
2881 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2882 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2883 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2884 return activationLayer;
2885}
2886
2887TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2888{
2889 if (fileName == nullptr)
2890 {
2891 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2892 CHECK_LOCATION().AsString()));
2893 }
2894 boost::system::error_code errorCode;
2895 boost::filesystem::path pathToFile(fileName);
2896 if (!boost::filesystem::exists(pathToFile, errorCode))
2897 {
Derek Lambertic9e52792020-03-11 11:42:26 +00002898 std::string locationString = CHECK_LOCATION().AsString();
2899 std::string msg = boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2900 fileName %
2901 errorCode %
2902 locationString);
2903 throw FileNotFoundException(msg);
telsoa01c577f2c2018-08-31 09:22:23 +01002904 }
2905 std::ifstream file(fileName, std::ios::binary);
2906 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2907 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2908 fileContent.size());
2909}
2910
2911TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2912{
2913 if (binaryContent == nullptr)
2914 {
2915 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2916 CHECK_LOCATION().AsString()));
2917 }
2918 flatbuffers::Verifier verifier(binaryContent, len);
2919 if (verifier.VerifyBuffer<tflite::Model>() == false)
2920 {
2921 throw ParseException(
2922 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2923 "flatbuffers format. size:%1% %2%") %
2924 len %
2925 CHECK_LOCATION().AsString()));
2926 }
2927 return tflite::UnPackModel(binaryContent);
2928}
2929
2930TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2931 size_t subgraphIndex,
2932 size_t operatorIndex)
2933{
2934 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2935
Derek Lambertiff05cc52019-04-26 13:05:17 +01002936 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2937 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002938
2939 size_t inputCount = operatorPtr->inputs.size();
2940 TensorRawPtrVector result(inputCount);
2941 for (size_t i=0; i<inputCount; ++i)
2942 {
2943 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002944 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002945 }
2946 return result;
2947}
2948
2949TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2950 size_t subgraphIndex,
2951 size_t operatorIndex)
2952{
2953 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2954
Derek Lambertiff05cc52019-04-26 13:05:17 +01002955 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2956 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002957
2958 size_t outputCount = operatorPtr->outputs.size();
2959 TensorRawPtrVector result(outputCount);
2960 for (size_t i=0; i<outputCount; ++i)
2961 {
2962 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2963 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002964 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002965 }
2966 return result;
2967}
2968
2969TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2970 size_t subgraphIndex)
2971{
2972 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002973 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002974
Derek Lambertiff05cc52019-04-26 13:05:17 +01002975 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002976 TensorIdRawPtrVector result(inputCount);
2977 for (size_t i=0; i<inputCount; ++i)
2978 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002979 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002980 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002981 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002982 }
2983 return result;
2984}
2985
2986TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2987 size_t subgraphIndex)
2988{
2989 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002990 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002991
Derek Lambertiff05cc52019-04-26 13:05:17 +01002992 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002993 TensorIdRawPtrVector result(outputCount);
2994 for (size_t i=0; i<outputCount; ++i)
2995 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002996 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2997 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002998 }
2999 return result;
3000}
3001
3002std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
3003 size_t subgraphIndex,
3004 size_t operatorIndex)
3005{
3006 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003007 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3008 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003009 return operatorPtr->inputs;
3010}
3011
3012std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
3013 size_t subgraphIndex,
3014 size_t operatorIndex)
3015{
3016 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003017 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3018 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003019 return operatorPtr->outputs;
3020}
3021
3022void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
3023 size_t operatorIndex,
3024 IConnectableLayer* layer,
3025 const std::vector<unsigned int>& tensorIndexes)
3026{
3027 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003028 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003029 if (tensorIndexes.size() != layer->GetNumInputSlots())
3030 {
3031 throw ParseException(
3032 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
3033 " for subgraph:%3% operator index:%4% %5%") %
3034 tensorIndexes.size() %
3035 layer->GetNumInputSlots() %
3036 subgraphIndex %
3037 operatorIndex %
3038 CHECK_LOCATION().AsString()));
3039 }
3040
3041 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
3042 {
3043 unsigned int tensorIndex = tensorIndexes[slotIndex];
3044 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
3045 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
3046 }
3047}
3048
3049void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
3050 size_t operatorIndex,
3051 IConnectableLayer* layer,
3052 const std::vector<unsigned int>& tensorIndexes)
3053{
3054 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003055 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003056 if (tensorIndexes.size() != layer->GetNumOutputSlots())
3057 {
3058 throw ParseException(
3059 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
3060 " for subgraph:%3% operator index:%4% %5%") %
3061 tensorIndexes.size() %
3062 layer->GetNumOutputSlots() %
3063 subgraphIndex %
3064 operatorIndex %
3065 CHECK_LOCATION().AsString()));
3066 }
3067
3068 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
3069 {
3070 unsigned int tensorIndex = tensorIndexes[slotIndex];
3071 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
3072 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3073 }
3074}
3075
3076void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
3077{
3078 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3079
3080 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
3081 for (auto const & tensorIdAndPtr : inputs)
3082 {
3083 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3084 IConnectableLayer* layer =
3085 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3086
3087 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
3088 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3089
3090 RegisterOutputSlots(subgraphIndex,
3091 VIRTUAL_OPERATOR_ID,
3092 layer,
3093 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3094 }
3095}
3096
3097void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
3098{
3099 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3100
3101 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
3102 for (auto const & tensorIdAndPtr : outputs)
3103 {
3104 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3105 IConnectableLayer* layer =
3106 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3107
3108 RegisterInputSlots(subgraphIndex,
3109 VIRTUAL_OPERATOR_ID,
3110 layer,
3111 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3112 }
3113}
3114
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003115void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
3116{
3117 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3118
Derek Lambertiff05cc52019-04-26 13:05:17 +01003119 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003120 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
3121 {
3122 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
3123 {
3124 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
3125 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
3126 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003127 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003128 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
3129 auto tensorAndData = CreateConstTensor(tensorPtr,
3130 tensorInfo,
3131 armnn::Optional<armnn::PermutationVector&>());
3132
3133 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
3134 IConnectableLayer *layer =
3135 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
3136
3137 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3138 RegisterOutputSlots(subgraphIndex,
3139 VIRTUAL_OPERATOR_ID,
3140 layer,
3141 { tensorIndex });
3142
3143 }
3144 }
3145 }
3146}
3147
telsoa01c577f2c2018-08-31 09:22:23 +01003148// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
3149TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
3150{
3151 CHECK_BUFFER(model, bufferIndex);
3152 return model->buffers[bufferIndex].get();
3153}
3154
Matteo Martincigh747ef822018-12-18 09:26:39 +00003155template<typename T>
3156std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
3157TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
3158 TfLiteParser::TensorRawPtr tensorPtr,
3159 armnn::TensorInfo& tensorInfo,
3160 armnn::Optional<armnn::PermutationVector&> permutationVector)
3161{
3162 auto constData = CreateConstTensorImpl<T>(bufferPtr,
3163 tensorPtr,
3164 tensorInfo,
3165 permutationVector);
3166 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
3167 return std::make_pair(constData.first, std::move(storage));
3168}
3169
telsoa01c577f2c2018-08-31 09:22:23 +01003170std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
3171TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00003172 armnn::TensorInfo& tensorInfo,
3173 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01003174{
3175 CHECK_TENSOR_PTR(tensorPtr);
3176 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
3177 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
3178
3179 switch (tensorInfo.GetDataType())
3180 {
3181 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003182 return CreateConstTensorAndStoreData<float>(bufferPtr,
3183 tensorPtr,
3184 tensorInfo,
3185 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00003186 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003187 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
3188 tensorPtr,
3189 tensorInfo,
3190 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00003191 case armnn::DataType::QSymmS8:
3192 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3193 tensorPtr,
3194 tensorInfo,
3195 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00003196 case armnn::DataType::QAsymmS8:
3197 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3198 tensorPtr,
3199 tensorInfo,
3200 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003201 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003202 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
3203 tensorPtr,
3204 tensorInfo,
3205 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003206 default:
3207 {
3208 std::stringstream errString;
3209 errString << "Unexpected datatype when creating const tensor: "
3210 << armnn::GetDataTypeName(tensorInfo.GetDataType())
3211 << " shape:" << tensorInfo.GetShape()
3212 << CHECK_LOCATION().AsString();
3213 throw ParseException(errString.str());
3214 }
3215 }
3216}
3217
3218BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
3219 const std::string& name) const
3220{
3221 CHECK_SUBGRAPH(m_Model, subgraphId);
3222 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3223 for (auto const & input : inputs)
3224 {
3225 if (input.second->name == name)
3226 {
3227 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3228 return std::make_pair(bindingId, ToTensorInfo(input.second));
3229 }
3230 }
3231
3232 std::stringstream bindings;
3233 for (auto const & input : inputs)
3234 {
3235 bindings << "'" << input.second->name << "' ";
3236 }
3237
3238 throw ParseException(
3239 boost::str(
3240 boost::format("No input binding found for subgraph:%1% and name:%2%. "
3241 "Possible inputs are: [%3%] %4%") %
3242 subgraphId %
3243 name %
3244 bindings.str() %
3245 CHECK_LOCATION().AsString()));
3246}
3247
3248BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
3249 const std::string& name) const
3250{
3251 CHECK_SUBGRAPH(m_Model, subgraphId);
3252 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003253 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01003254 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003255 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01003256 if (output.second->name == name)
3257 {
3258 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003259 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3260 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3261 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01003262 }
3263 }
3264
3265 std::stringstream bindings;
3266 for (auto const & output : outputs)
3267 {
3268 bindings << "'" << output.second->name << "' ";
3269 }
3270
3271 throw ParseException(
3272 boost::str(
3273 boost::format("No output binding found for subgraph:%1% and name:%2%. "
3274 "Possible outputs are: [%3%] %4%") %
3275 subgraphId %
3276 name %
3277 bindings.str() %
3278 CHECK_LOCATION().AsString()));
3279}
3280
3281size_t TfLiteParser::GetSubgraphCount() const
3282{
3283 return m_Model->subgraphs.size();
3284}
3285
3286std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
3287{
3288 CHECK_SUBGRAPH(m_Model, subgraphId);
3289 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3290 std::vector<std::string> result;
3291 result.reserve(inputs.size());
3292 for (auto const & input : inputs)
3293 {
3294 result.push_back(input.second->name);
3295 }
3296 return result;
3297}
3298
3299std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
3300{
3301 CHECK_SUBGRAPH(m_Model, subgraphId);
3302 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3303 std::vector<std::string> result;
3304 result.reserve(outputs.size());
3305 for (auto const & output : outputs)
3306 {
3307 result.push_back(output.second->name);
3308 }
3309 return result;
3310}
3311
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003312ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003313{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003314 return new TfLiteParser(options);
telsoa01c577f2c2018-08-31 09:22:23 +01003315}
3316
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003317ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003318{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003319 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +01003320}
3321
3322void ITfLiteParser::Destroy(ITfLiteParser* parser)
3323{
3324 delete parser;
3325}
3326
3327TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
3328: m_FloatData(std::move(data))
3329, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003330, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003331, m_Int32Data(nullptr)
3332{
3333}
3334
3335TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
3336: m_FloatData(nullptr)
3337, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00003338, m_Int8Data(nullptr)
3339, m_Int32Data(nullptr)
3340{
3341}
3342
3343TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
3344: m_FloatData(nullptr)
3345, m_Uint8Data(nullptr)
3346, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01003347, m_Int32Data(nullptr)
3348{
3349}
3350
3351TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
3352: m_FloatData(nullptr)
3353, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003354, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003355, m_Int32Data(std::move(data))
3356{
3357}
3358
3359} // armnnTfLiteParser