blob: f5c01f249a54135ed7733de515eaaa63fcf030ba [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Bentham39ef3e52020-01-20 10:09:09 +00008#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +01009#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000010#include <armnn/Logging.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010011#include <armnn/TypesUtils.hpp>
12#include <boost/filesystem.hpp>
13
14// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000015#include <armnnUtils/Permute.hpp>
16
Sadik Armagan479045b2018-10-01 11:51:37 +010017#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010018#include <VerificationHelpers.hpp>
19
20// The generated code based on the Tf Lite schema:
21#include <schema_generated.h>
22
Matteo Martincighe011d202019-11-28 11:35:47 +000023#include <flatbuffers/flexbuffers.h>
24
telsoa01c577f2c2018-08-31 09:22:23 +010025#include <boost/core/ignore_unused.hpp>
26#include <boost/assert.hpp>
27#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010028#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010029
30#include <fstream>
31#include <algorithm>
32#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010033#include <numeric>
telsoa01c577f2c2018-08-31 09:22:23 +010034
35using namespace armnn;
36using armnn::CheckLocation;
37namespace armnnTfLiteParser
38{
39namespace
40{
jimfly01c25411c2018-11-14 17:47:22 +000041
telsoa01c577f2c2018-08-31 09:22:23 +010042const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
43
44void CheckSubgraph(const TfLiteParser::ModelPtr & model,
45 size_t subgraphIndex,
46 const CheckLocation & location)
47{
48 if (model.get() == nullptr)
49 {
50 throw ParseException(
51 boost::str(
52 boost::format("%1% was called with invalid (null) model. "
53 "Possible reason is that the model is not yet loaded and Unpack(ed). "
54 "subgraph:%2% at %3%") %
55 location.m_Function %
56 subgraphIndex %
57 location.FileLine()));
58 }
59 else if (subgraphIndex >= model->subgraphs.size())
60 {
61 throw ParseException(
62 boost::str(
63 boost::format("%1% was called with an invalid subgraph index. "
64 "subgraph:%2% at %3%") %
65 location.m_Function %
66 subgraphIndex %
67 location.FileLine()));
68 }
69}
70
71#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
72 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
73
74void CheckModel(const TfLiteParser::ModelPtr & model,
75 size_t subgraphIndex,
76 size_t operatorIndex,
77 const CheckLocation & location)
78{
79 if (model.get() == nullptr)
80 {
81 throw ParseException(
82 boost::str(
83 boost::format("%1% was called with invalid (null) model. "
84 "Possible reason is that the model is not yet loaded and Unpack(ed). "
85 "subgraph:%2% operator:%3% at %4%") %
86 location.m_Function %
87 subgraphIndex %
88 operatorIndex %
89 location.FileLine()));
90 }
91 else if (subgraphIndex >= model->subgraphs.size())
92 {
93 throw ParseException(
94 boost::str(
95 boost::format("%1% was called with an invalid subgraph index. "
96 "subgraph:%2% operator:%3% at %4%") %
97 location.m_Function %
98 subgraphIndex %
99 operatorIndex %
100 location.FileLine()));
101 }
102 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
103 operatorIndex != VIRTUAL_OPERATOR_ID)
104 {
105 throw ParseException(
106 boost::str(
107 boost::format("%1% was called with an invalid operator index. "
108 "subgraph:%2% operator:%3% at %4%") %
109 location.m_Function %
110 subgraphIndex %
111 operatorIndex %
112 location.FileLine()));
113 }
114}
115
116#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
117 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
118
119void CheckTensor(const TfLiteParser::ModelPtr & model,
120 size_t subgraphIndex,
121 size_t tensorIndex,
122 const CheckLocation & location)
123{
124 // not checking model, because I assume CHECK_MODEL already run
125 // and checked that. An assert would do.
126 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
127
128 // also subgraph index should be checked by CHECK_MODEL so
129 // I only add an assert here
130 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
131
132 // the tensor index is the only one to check here
133 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
134 {
135 throw ParseException(
136 boost::str(
137 boost::format("%1% was called with an invalid tensor index. "
138 "subgraph:%2% tensor:%3% at %4%") %
139 location.m_Function %
140 subgraphIndex %
141 tensorIndex %
142 location.FileLine()));
143 }
144}
145
146#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
147 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
148
149void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
150 const CheckLocation & location)
151{
152 if (rawPtr == nullptr)
153 {
154 throw ParseException(
155 boost::str(
156 boost::format("%1% was called with a null tensor pointer. "
157 "at %2%") %
158 location.m_Function %
159 location.FileLine()));
160
161 }
162}
163
164#define CHECK_TENSOR_PTR(TENSOR_PTR) \
165 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
166
167void CheckBuffer(const TfLiteParser::ModelPtr & model,
168 size_t bufferIndex,
169 const CheckLocation & location)
170{
171 if (model.get() == nullptr)
172 {
173 throw ParseException(
174 boost::str(
175 boost::format("%1% was called with invalid (null) model. "
176 "Possible reason is that the model is not yet loaded and Unpack(ed). "
177 "buffer:%2% at %3%") %
178 location.m_Function %
179 bufferIndex %
180 location.FileLine()));
181 }
182 else if (bufferIndex >= model->buffers.size())
183 {
184 throw ParseException(
185 boost::str(
186 boost::format("%1% was called with an invalid buffer index. "
187 "buffer index:%2% at %3%") %
188 location.m_Function %
189 bufferIndex %
190 location.FileLine()));
191 }
192 else if (model->buffers[bufferIndex].get() == nullptr)
193 {
194 throw ParseException(
195 boost::str(
196 boost::format("The buffer #%1% is null. %3%") %
197 bufferIndex %
198 location.AsString()));
199 }
200}
201
202#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
203 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
204
205void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
206 const armnn::TensorInfo & tensorInfo,
207 uint32_t bufferId,
208 const CheckLocation & location)
209{
210 if (bufferPtr == nullptr)
211 {
212 throw ParseException(
213 boost::str(
214 boost::format("BufferPtr is null for buffer:%1%. %2%") %
215 bufferId %
216 location.AsString()));
217 }
218 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
219 tensorInfo.GetNumBytes() > bufferPtr->data.size())
220 {
221 std::stringstream ss;
222 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
223 << "For tensor: " << tensorInfo.GetShape()
224 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
225 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
226 throw ParseException(ss.str());
227 }
228}
229
230#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
231 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
232
233bool IsActivationSupported(tflite::ActivationFunctionType activationType)
234{
235 switch(activationType)
236 {
237 case tflite::ActivationFunctionType_NONE:
238 case tflite::ActivationFunctionType_RELU:
239 case tflite::ActivationFunctionType_RELU6:
240 case tflite::ActivationFunctionType_TANH:
241 {
242 return true;
243 }
244 default:
245 {
246 return false;
247 }
248 }
249}
250
251#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
252 do { \
253 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
254 { \
255 throw ParseException( \
256 boost::str( \
257 boost::format("TfLite parser doesn't suppport fused activation: " \
258 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
259 OPTION->fused_activation_function % \
260 tflite::EnumNameActivationFunctionType(\
261 OPTION->fused_activation_function) % \
262 __func__ % \
263 SUBGRAPH_INDEX % \
264 OPERATOR_INDEX % \
265 CHECK_LOCATION().FileLine())); \
266 } \
267 } while(false)
268
269
270std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
271{
272 std::vector<unsigned int> result;
273 result.reserve(in.size());
274 for (auto & i : in)
275 {
276 result.push_back(CHECKED_NON_NEGATIVE(i));
277 }
278 return result;
279}
280
281void CalcPadding(uint32_t inputSize,
282 uint32_t filterSize,
283 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100284 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100285 uint32_t& paddingFront,
286 uint32_t& paddingBack,
287 tflite::Padding padding)
288{
289 paddingFront = 0;
290 paddingBack = 0;
291 if (padding == tflite::Padding_SAME)
292 {
293 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100294 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
295 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100296 if (temp > inputSize)
297 {
298 paddingFront = (temp - inputSize) / 2;
299 paddingBack = (temp - inputSize) - paddingFront;
300 }
301 }
302}
303
Keith Davis0c2eeac2020-02-11 16:51:50 +0000304armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes,
305 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
telsoa01c577f2c2018-08-31 09:22:23 +0100306{
307 armnn::DataType type;
308 CHECK_TENSOR_PTR(tensorPtr);
309
310 switch (tensorPtr->type)
311 {
312 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000313 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100314 break;
315 case tflite::TensorType_FLOAT32:
316 type = armnn::DataType::Float32;
317 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000318 case tflite::TensorType_INT8:
Keith Davisa9057352020-02-19 10:08:33 +0000319 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000320 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000321 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000322 type = armnn::DataType::QAsymmS8;
323 }
324 else
325 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000326 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000327 type = armnn::DataType::QSymmS8;
328 }
Finn Williamsed66d142019-12-06 09:55:55 +0000329 break;
330 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000331 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000332 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100333 case tflite::TensorType_INT32:
334 type = armnn::DataType::Signed32;
335 break;
336
337 default:
338 {
339 CheckLocation location = CHECK_LOCATION();
340 throw ParseException(
341 boost::str(
342 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
343 tensorPtr->type %
344 tflite::EnumNameTensorType(tensorPtr->type) %
345 tensorPtr->name %
346 location.AsString()));
347 }
348 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100349 std::vector<unsigned int> safeShape = shapes;
350 if (safeShape.size() == 0)
351 {
352 safeShape.push_back(1);
353 }
354
Keith Davisd305e1a2020-01-22 11:57:54 +0000355 float quantizationScale = 0.0f;
356 int32_t quantizationOffset = 0;
357
358 if (tensorPtr->quantization.get())
359 {
360 if (tensorPtr->quantization->scale.size() <= 1)
361 {
362 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
363 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
364
365 if (tensorPtr->quantization->scale.size() == 1)
366 {
367 quantizationScale = tensorPtr->quantization->scale[0];
368 }
369 if (tensorPtr->quantization->zero_point.size() == 1)
370 {
371 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000372 // but this is what we support at the moment in ArmNN
Keith Davisd305e1a2020-01-22 11:57:54 +0000373 quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
374 }
375
376 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
377 safeShape.data(),
378 type,
379 quantizationScale,
380 quantizationOffset);
381
382 return result;
383 }
384 else
385 {
386 std::vector<float> quantizationScales;
387 std::vector<int32_t> quantizationOffsets;
388
389 // Scale
390 std::copy(tensorPtr->quantization->scale.begin(),
391 tensorPtr->quantization->scale.end(),
392 std::back_inserter(quantizationScales));
393
Keith Davis0c2eeac2020-02-11 16:51:50 +0000394 // QSymmS8 Per-axis
Keith Davisd305e1a2020-01-22 11:57:54 +0000395 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
396 safeShape.data(),
397 type,
398 quantizationScales,
Keith Davis0c2eeac2020-02-11 16:51:50 +0000399 dimensionMappings[boost::numeric_cast<unsigned int>(
400 tensorPtr->quantization->quantized_dimension)]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000401 return result;
402 }
403 }
404 else
405 {
406 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
407 safeShape.data(),
408 type,
409 quantizationScale,
410 quantizationOffset);
411 return result;
412 }
telsoa01c577f2c2018-08-31 09:22:23 +0100413}
414
Keith Davis0c2eeac2020-02-11 16:51:50 +0000415armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
416 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000417{
418 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000419 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000420}
421
telsoa01c577f2c2018-08-31 09:22:23 +0100422template<typename T>
423std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
424CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
425 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000426 armnn::TensorInfo& tensorInfo,
427 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100428{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000429 boost::ignore_unused(tensorPtr);
telsoa01c577f2c2018-08-31 09:22:23 +0100430 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
431 BOOST_ASSERT_MSG(bufferPtr != nullptr,
432 boost::str(
433 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
434
435 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000436
437 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
438 {
439 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000440 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
441 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000442 }
443 else
444 {
445 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
446 }
447
telsoa01c577f2c2018-08-31 09:22:23 +0100448 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
449}
450
telsoa01c577f2c2018-08-31 09:22:23 +0100451armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
452{
453 // generate the binding id by shifting the tensor id by 8 bit
454 // and add the subgraph id, which allows 256 subgraphs
455 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
456}
457
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000458bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
459{
460 const unsigned int actualSize = actual.GetNumDimensions();
461 if (actualSize != expected.size())
462 {
463 return false;
464 }
465
466 for (unsigned int i = 0u; i < actualSize; i++)
467 {
468 if (expected[i] < 0 ||
469 actual[i] != static_cast<unsigned int>(expected[i]))
470 {
471 return false;
472 }
473 }
474
475 return true;
476}
477
telsoa01c577f2c2018-08-31 09:22:23 +0100478} // <anonymous>
479
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100480TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
481: m_Options(options)
482, m_Network(nullptr, nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +0100483, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
484{
485 // register supported operators
Sadik Armagan66dedc72019-12-10 16:32:07 +0000486 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000487 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
488 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
489 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
490 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000491 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000492 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
Finn Williamsed66d142019-12-06 09:55:55 +0000493 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000494 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
495 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
496 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
497 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
498 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000499 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000500 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000501 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
502 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
503 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
504 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000505 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
506 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
507 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
508 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
509 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000510 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000511 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
512 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000513 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000514 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
515 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
516 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000517 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
518 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
519 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
520 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100521
522 // register supported custom operators
523 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100524}
525
526void TfLiteParser::ResetParser()
527{
528 m_Network = armnn::INetworkPtr(nullptr, nullptr);
529 m_Model = nullptr;
530 m_SubgraphConnections.clear();
531}
532
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200533void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
534 size_t operatorIndex,
535 IConnectableLayer *layer)
536{
537 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
538 BOOST_ASSERT(layer != nullptr);
539
Derek Lambertiff05cc52019-04-26 13:05:17 +0100540 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
541 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200542
543 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
544
545 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100546 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200547 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100548 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200549
550 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
551 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
552
553 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
554 {
555 uint32_t id = reshapedInputId;
556 reshapedInputId = inputId;
557 inputId = id;
558
559 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
560 inputTensorInfo = ToTensorInfo(tensorPtr);
561 }
562
563 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
564
565 std::vector<unsigned> reshapedDim;
566 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
567 {
568 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
569 }
570
571 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
572 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
573
574 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
575
576 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
577 armnn::ReshapeDescriptor desc;
578 desc.m_TargetShape = reshapedTensorInfo.GetShape();
579 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
580
581 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
582 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
583
584 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
585
586 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
587 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
588}
589
telsoa01c577f2c2018-08-31 09:22:23 +0100590INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
591{
592 ResetParser();
593 m_Model = LoadModelFromFile(graphFile);
594 return CreateNetworkFromModel();
595}
596
597INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
598{
599 ResetParser();
600 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
601 return CreateNetworkFromModel();
602}
603
604INetworkPtr TfLiteParser::CreateNetworkFromModel()
605{
606 m_Network = INetwork::Create();
607 BOOST_ASSERT(m_Model.get() != nullptr);
608
609 bool failedToCreate = false;
610 std::stringstream errors;
611
612 if (m_Model->subgraphs.size() != 1)
613 {
614 throw ParseException(
615 boost::str(
616 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
617 m_Model->subgraphs.size() %
618 CHECK_LOCATION().AsString()));
619 }
620
621 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100622 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100623 {
624 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
625
626 size_t operatorIndex = 0;
627 for (OperatorPtr const & op : subgraph->operators)
628 {
629 try
630 {
telsoa01c577f2c2018-08-31 09:22:23 +0100631 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
632 auto builtinCode = opCodePtr->builtin_code;
633
634 if (builtinCode > tflite::BuiltinOperator_MAX)
635 {
636 throw ParseException(
637 boost::str(
638 boost::format("Operator code %1% is out of range 0-%2%. "
639 "subgraph:%3% operator idx:%4%. %5%") %
640 builtinCode %
641 tflite::BuiltinOperator_MAX %
642 subgraphIndex %
643 operatorIndex %
644 CHECK_LOCATION().AsString()));
645 }
646
647 // lookup and call the parser function
648 auto & parserFunction = m_ParserFunctions[builtinCode];
649 (this->*parserFunction)(subgraphIndex, operatorIndex);
650 }
651 catch (const ParseException& e)
652 {
653 failedToCreate = true;
654 std::stringstream errorString;
655
656 errorString << "Failed to parse operator #" << operatorIndex
657 << " within subgraph #" << subgraphIndex
658 << " error: " << e.what();
Derek Lamberti08446972019-11-26 16:38:31 +0000659 ARMNN_LOG(error) << errorString.str();
telsoa01c577f2c2018-08-31 09:22:23 +0100660
661 errors << errorString.str() << "\n";
662 }
663 ++operatorIndex;
664 }
665
666 SetupInputLayers(subgraphIndex);
667 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200668 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100669
670 ++subgraphIndex;
671 }
672
673 if (failedToCreate)
674 {
675 // we can skip everything and let the outer exception handler deal with the error
676 throw ParseException(errors.str());
677 }
678
679 // establish the connections from the layer outputs to the inputs of the subsequent layers
680 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
681 {
682 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
683 {
684 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
685 {
686 for (size_t inputSlotIdx = 0;
687 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
688 ++inputSlotIdx)
689 {
690 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
691 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
692 }
693 }
694 }
695 }
696
697 return std::move(m_Network);
698}
699
700void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
701 size_t tensorIndex,
702 armnn::IOutputSlot* slot)
703{
704 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
705 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
706 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
707
708 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
709
710 // assuming there is only one producer for that tensor
711 if (tensorSlots.outputSlot != nullptr)
712 {
713 throw ParseException(boost::str(
714 boost::format("Another layer has already registered itself as the producer of "
715 "subgraph:%1% tensor:%2% %3%") %
716 subgraphIndex %
717 tensorIndex %
718 CHECK_LOCATION().AsString()));
719 }
720
721 tensorSlots.outputSlot = slot;
722}
723
724void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
725 size_t tensorIndex,
726 armnn::IInputSlot* slot)
727{
728 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
729 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
730 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
731
732 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
733 tensorSlots.inputSlots.push_back(slot);
734}
735
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100736void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
737{
738 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
739
740 // NOTE: By default we presume the custom operator is not supported
741 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
742
743 // Identify custom code defined for custom operator
744 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
745 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
746
747 // Find parser function that correspondes to custom code (if any)
748 auto iterator = m_CustomParserFunctions.find(customCode);
749 if (iterator != m_CustomParserFunctions.end())
750 {
751 customParserFunction = iterator->second;
752 }
753
754 // Run parser function
755 (this->*customParserFunction)(subgraphIndex, operatorIndex);
756}
757
telsoa01c577f2c2018-08-31 09:22:23 +0100758void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
759{
760 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100761
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100762 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
763
764 auto opcodeIndex = operatorPtr->opcode_index;
765 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
766
767 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
768 {
769 // Do not add StandInLayer, throw ParseException instead
770 throw ParseException(
771 boost::str(
772 boost::format("Operator not supported. "
773 "subgraph:%1% operator:%2% "
774 "opcode_index:%3% opcode:%4% / %5% %6%") %
775 subgraphIndex %
776 operatorIndex %
777 opcodeIndex %
778 opcode %
779 tflite::EnumNameBuiltinOperator(opcode) %
780 CHECK_LOCATION().AsString()));
781 }
782
783 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
784 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
785
786 const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
787 const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
788
789 StandInDescriptor descriptor(numInputs, numOutputs);
790 auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
791
792 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
793 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
794 for (unsigned int i = 0u; i < numOutputs; ++i)
795 {
796 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i]));
797 }
798
799 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
800 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
801
802 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
803 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100804}
805
telsoa01c577f2c2018-08-31 09:22:23 +0100806void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
807{
808 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
809
810 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
811 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
812
813 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
814
815 Convolution2dDescriptor desc;
816 desc.m_BiasEnabled = false;
817 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
818 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000819 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100820 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
821 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000822
telsoa01c577f2c2018-08-31 09:22:23 +0100823 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
824 CHECK_VALID_SIZE(inputs.size(), 2, 3);
825
826 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
827 CHECK_VALID_SIZE(outputs.size(), 1);
828
829 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
830 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
831
832 // assuming input is NHWC
833 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
834 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
835
836 // assuming the filter is OHWI : Output, H, W, Input
837 // which is essentially the same as NHWC
838 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
839 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
840
Pablo Tellof0bd6832019-04-26 17:58:13 +0100841 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
842 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
843 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
844 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100845
Matteo Martincigh747ef822018-12-18 09:26:39 +0000846 auto filterTensorAndData = CreateConstTensor(inputs[1],
847 filterTensorInfo,
848 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100849 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100850
851 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
852
853 if (inputs.size() == 3)
854 {
855 desc.m_BiasEnabled = true;
856 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000857 auto biasTensorAndData = CreateConstTensor(inputs[2],
858 biasTensorInfo,
859 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100860 layer = m_Network->AddConvolution2dLayer(desc,
861 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100862 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100863 layerName.c_str());
864 }
865 else
866 {
867 layer = m_Network->AddConvolution2dLayer(desc,
868 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100869 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100870 layerName.c_str());
871 }
872
873 BOOST_ASSERT(layer != nullptr);
874
telsoa01c577f2c2018-08-31 09:22:23 +0100875 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000876 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100877
878 // register the input connection slots for the layer, connections are made after all layers have been created
879 // only the tensors for the inputs are relevant, exclude the const tensors
880 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000881 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100882
jimfly01c25411c2018-11-14 17:47:22 +0000883 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100884 // register the output connection slots for the layer, connections are made after all layers have been created
885 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
886 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
887}
888
889void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
890{
891 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
892
893 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
894 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
895
896 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
897
898 DepthwiseConvolution2dDescriptor desc;
899 desc.m_BiasEnabled = false;
900 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
901 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000902 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100903 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100904
905 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
906 CHECK_VALID_SIZE(inputs.size(), 2, 3);
907 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
908 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100909 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
910 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000911
Keith Davis0c2eeac2020-02-11 16:51:50 +0000912 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
913 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
914
telsoa01c577f2c2018-08-31 09:22:23 +0100915 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000916 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1], permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100917
Matteo Martincigh747ef822018-12-18 09:26:39 +0000918 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100919 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
920 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000921
922 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100923 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
924 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
925
Matteo Martincigh747ef822018-12-18 09:26:39 +0000926 // Reshape weights as [ H, W, I, M ]
927 filterTensorInfo.SetShape({ filterHeight,
928 filterWidth,
929 inputTensorInfo.GetShape()[3],
930 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
931
Pablo Tellof0bd6832019-04-26 17:58:13 +0100932 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
933 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
934 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
935 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100936
Matteo Martincigh747ef822018-12-18 09:26:39 +0000937 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100938 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100939 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
940
941 if (inputs.size() == 3)
942 {
943 desc.m_BiasEnabled = true;
944 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000945 auto biasTensorAndData = CreateConstTensor(inputs[2],
946 biasTensorInfo,
947 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100948 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
949 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100950 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100951 layerName.c_str());
952 }
953 else
954 {
955 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
956 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100957 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100958 layerName.c_str());
959 }
960 BOOST_ASSERT(layer != nullptr);
961
telsoa01c577f2c2018-08-31 09:22:23 +0100962 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000963 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100964
965 // register the input connection slots for the layer, connections are made after all layers have been created
966 // only the tensors for the inputs are relevant, exclude the const tensors
967 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000968 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100969
jimfly01c25411c2018-11-14 17:47:22 +0000970 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100971 // register the output connection slots for the layer, connections are made after all layers have been created
972 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
973 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
974}
975
Finn Williamsed66d142019-12-06 09:55:55 +0000976void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
977{
978 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
979
980 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
981 CHECK_VALID_SIZE(inputs.size(), 1);
982
983 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
984 CHECK_VALID_SIZE(outputs.size(), 1);
985
986 auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
987
988 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
989 BOOST_ASSERT(layer != nullptr);
990
991 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
992 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
993
994 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
995 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
996
997 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
998 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
999}
1000
Keith Davis4cd29a02019-09-09 14:49:20 +01001001void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
1002{
1003 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1004
1005 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001006 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001007
1008 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1009 CHECK_VALID_SIZE(outputs.size(), 1);
1010
1011 armnn::IConnectableLayer* layer = nullptr;
1012 auto layerName = boost::str(boost::format("Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
1013
1014 PermuteDescriptor desc;
1015
josh minorba424d22019-11-13 10:55:17 -06001016 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001017 {
1018 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1019 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001020 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1021 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001022 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
1023
josh minorba424d22019-11-13 10:55:17 -06001024 // permuteShape assumes Tf/Np permute vectors, we must translate to armnn expected form
1025 // to do so we find the perm vector which would invert what a tf perm vector would do (ex 3,0,1,2 -> 1,2,3,0)
1026 std::vector<unsigned int> armnnPermuteShape(numPermVecElements);
1027 std::vector<unsigned int>::iterator it;
1028 for (unsigned int i = 0u; i < numPermVecElements; ++i)
1029 {
1030 it = std::find(permuteShape.begin(), permuteShape.end(), i);
1031 armnnPermuteShape[i] = static_cast<unsigned int>(std::distance(permuteShape.begin(), it));
1032 }
Kevin May85d92602019-09-27 17:21:06 +01001033
josh minorba424d22019-11-13 10:55:17 -06001034 PermutationVector permutationVector(armnnPermuteShape.data(), permuteTensorInfo.GetNumElements());
1035
1036 desc = PermuteDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001037 }
1038
Keith Davis4cd29a02019-09-09 14:49:20 +01001039 layer = m_Network->AddPermuteLayer(desc, layerName.c_str());
1040
1041 BOOST_ASSERT(layer != nullptr);
1042
1043 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1044 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1045
1046 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1047 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1048
1049 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1050 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1051}
1052
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001053void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
1054{
1055 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1056
1057 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1058 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1059
1060 TransposeConvolution2dDescriptor desc;
1061 desc.m_BiasEnabled = false;
1062 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1063 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1064 desc.m_DataLayout = armnn::DataLayout::NHWC;
1065
1066 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001067 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001068
1069 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1070 CHECK_VALID_SIZE(outputs.size(), 1);
1071
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001072 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001073 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1074
1075 // TfLite uses NHWC tensors
1076 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1077 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1078
1079 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1080 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1081
1082 CalcPadding(inputHeight,
1083 filterHeight,
1084 desc.m_StrideY,
1085 1, // DilationY
1086 desc.m_PadTop,
1087 desc.m_PadBottom,
1088 options->padding);
1089
1090 CalcPadding(inputWidth,
1091 filterWidth,
1092 desc.m_StrideX,
1093 1, // DilationX
1094 desc.m_PadLeft,
1095 desc.m_PadRight,
1096 options->padding);
1097
1098 auto filterTensorAndData = CreateConstTensor(inputs[1],
1099 filterTensorInfo,
1100 armnn::Optional<armnn::PermutationVector&>());
1101
1102 armnn::IConnectableLayer* layer = nullptr;
1103 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
1104
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001105 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1106 filterTensorAndData.first,
1107 EmptyOptional(),
1108 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001109
1110 BOOST_ASSERT(layer != nullptr);
1111
1112 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1113 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1114
1115 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1116 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001117 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001118
1119 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1120 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1121}
1122
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001123void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
1124{
1125 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1126}
1127
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001128void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
1129{
1130 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1131
1132 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1133 CHECK_VALID_SIZE(inputs.size(), 3);
1134
1135 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1136 CHECK_VALID_SIZE(outputs.size(), 1);
1137
1138 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1139 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1140
1141 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1142 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1143
1144 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1145 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1146
1147 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1148 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1149
1150 size_t step = 2;
1151 std::vector<std::pair<unsigned int, unsigned int>> crops;
1152 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1153 {
1154 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1155 }
1156
1157 armnn::BatchToSpaceNdDescriptor desc;
1158 desc.m_BlockShape = blockShape;
1159 desc.m_Crops = crops;
1160 desc.m_DataLayout = armnn::DataLayout::NHWC;
1161
1162 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1163
1164 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1165 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1166
1167 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1168
1169 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1170 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1171
1172 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1173 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1174}
1175
Matthew Jackson28c94572019-07-18 10:47:03 +01001176void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1177{
1178 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1179
1180 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1181 CHECK_VALID_SIZE(inputs.size(), 1);
1182
1183 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1184 CHECK_VALID_SIZE(outputs.size(), 1);
1185
1186 L2NormalizationDescriptor desc;
1187 desc.m_DataLayout = armnn::DataLayout::NHWC;
1188 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1189 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1190
1191 BOOST_ASSERT(layer != nullptr);
1192
1193 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1194 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1195
1196 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1197 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1198
1199 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1200 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1201}
1202
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001203void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1204{
1205 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1206}
1207
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001208void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1209{
1210 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1211
1212 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1213 CHECK_VALID_SIZE(inputs.size(), 2);
1214
1215 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1216 CHECK_VALID_SIZE(outputs.size(), 1);
1217
1218 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1219 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1220
1221 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1222 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1223
1224 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1225 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1226
1227 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1228 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1229 {
1230 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1231 }
1232 else
1233 {
1234 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1235 }
1236
1237 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1238 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1239}
1240
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001241void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1242{
1243 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1244
1245 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1246 CHECK_VALID_SIZE(inputs.size(), 2);
1247
1248 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1249 CHECK_VALID_SIZE(outputs.size(), 1);
1250
1251 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1252 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1253
1254 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1255 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1256
1257 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1258 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1259
1260 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1261 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1262 {
1263 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1264 }
1265 else
1266 {
1267 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1268 }
1269
1270 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1271 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1272}
1273
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001274void TfLiteParser::ParsePool(size_t subgraphIndex,
1275 size_t operatorIndex,
1276 PoolingAlgorithm algorithm)
1277{
1278 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1279
1280 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1281 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1282
1283 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1284
1285 std::string layerName;
1286
1287 switch (algorithm)
1288 {
1289 case PoolingAlgorithm::Average:
1290 layerName =
1291 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1292 break;
1293 case PoolingAlgorithm::Max:
1294 layerName =
1295 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1296 break;
1297 default:
1298 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1299 }
1300
1301 Pooling2dDescriptor desc;
1302
1303 desc.m_PoolType = algorithm;
1304 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1305 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1306 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1307 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1308 desc.m_PaddingMethod = PaddingMethod::Exclude;
1309 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001310 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001311
1312 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1313 CHECK_VALID_SIZE(inputs.size(), 1);
1314 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1315
1316 // assuming input is NHWC
1317 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1318 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1319
Pablo Tellof0bd6832019-04-26 17:58:13 +01001320 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1321 desc.m_PadTop, desc.m_PadBottom, options->padding);
1322 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1323 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001324
1325 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1326 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001327
1328 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1329
1330 BOOST_ASSERT(layer != nullptr);
1331
jimfly01c25411c2018-11-14 17:47:22 +00001332 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1333 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001334
1335 // register the input connection slots for the layer, connections are made after all layers have been created
1336 // only the tensors for the inputs are relevant, exclude the const tensors
1337 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001338 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001339
jimfly01c25411c2018-11-14 17:47:22 +00001340 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001341 // register the output connection slots for the layer, connections are made after all layers have been created
1342 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1343 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1344}
1345
josh minorba424d22019-11-13 10:55:17 -06001346void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
1347{
1348 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1349
1350 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1351 CHECK_VALID_SIZE(inputs.size(), 3);
1352 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1353 CHECK_VALID_SIZE(outputs.size(), 1);
1354
1355 SliceDescriptor desc;
1356
1357 // set begin tensor info for slice descriptor
1358 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1359 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1360
1361 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1362 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1363
1364 // set size tensor info for slice descriptor
1365 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1366 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1367
1368 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1369 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1370 desc = SliceDescriptor(begin, size);
1371
1372 auto layerName = boost::str(boost::format("Slice:%1%:%2%") % subgraphIndex % operatorIndex);
1373 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
1374
1375 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1376 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1377
1378 // register the input connection slots for the layer, connections are made after all layers have been created
1379 // only the tensors for the inputs are relevant, exclude the const tensors
1380 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1381 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1382
1383 // register the output connection slots for the layer, connections are made after all layers have been created
1384 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1385 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1386}
1387
telsoa01c577f2c2018-08-31 09:22:23 +01001388void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1389{
1390 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1391 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1392 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1393
1394 SoftmaxDescriptor desc;
1395 desc.m_Beta = options->beta;
1396
1397 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1398 CHECK_VALID_SIZE(inputs.size(), 1);
1399 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1400 CHECK_VALID_SIZE(outputs.size(), 1);
1401
1402 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1403 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1404
1405 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1406 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1407
1408 // register the input connection slots for the layer, connections are made after all layers have been created
1409 // only the tensors for the inputs are relevant, exclude the const tensors
1410 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1411 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1412
1413 // register the output connection slots for the layer, connections are made after all layers have been created
1414 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1415 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1416}
1417
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001418void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1419{
1420 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1421
1422 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1423 CHECK_VALID_SIZE(inputs.size(), 3);
1424
1425 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1426 CHECK_VALID_SIZE(outputs.size(), 1);
1427
1428 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1429 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1430
1431 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1432 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1433
1434 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1435 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1436
1437 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1438 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1439
1440 size_t step = 2;
1441 std::vector<std::pair<unsigned int, unsigned int>> padList;
1442 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1443 {
1444 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1445 }
1446
1447 armnn::SpaceToBatchNdDescriptor desc;
1448 desc.m_BlockShape = blockShape;
1449 desc.m_PadList = padList;
1450 desc.m_DataLayout = armnn::DataLayout::NHWC;
1451
1452 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1453
1454 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1455 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1456
1457 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1458
1459 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1460 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1461
1462 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1463 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1464}
1465
telsoa01c577f2c2018-08-31 09:22:23 +01001466armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1467 const armnn::TensorInfo & inputTensorInfo)
1468{
1469 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1470 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1471 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1472
1473 if (inputTensorInfo.GetNumDimensions() > 4)
1474 {
1475 std::stringstream ss;
1476 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1477 << " shape:" << inputTensorInfo.GetShape() << " "
1478 << CHECK_LOCATION().AsString();
1479 throw ParseException(ss.str());
1480 }
1481
1482 if (squeezeDims.empty())
1483 {
1484 squeezeDims.assign(dimensionSequence,
1485 dimensionSequence+inputTensorInfo.GetNumDimensions());
1486 }
1487
1488 std::vector<uint32_t> outputDims;
1489 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1490 {
1491 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1492 auto currentDimension = inputTensorInfo.GetShape()[i];
1493 if (skipSqueeze || currentDimension != 1)
1494 {
1495 outputDims.push_back(currentDimension);
1496 }
1497 }
1498
1499 if (outputDims.size() > 4)
1500 {
1501 std::stringstream ss;
1502 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1503 << " shape:" << inputTensorInfo.GetShape() << " "
1504 << CHECK_LOCATION().AsString();
1505 throw ParseException(ss.str());
1506 }
1507
1508 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1509 outputDims.data());
1510
1511 // we need to preserve the tensor type and the quantization data as well
1512 TensorInfo outTensorInfo = inputTensorInfo;
1513 outTensorInfo.SetShape(outShape);
1514
1515 return outTensorInfo;
1516}
1517
1518void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1519{
1520 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1521
1522 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1523 CHECK_VALID_SIZE(inputs.size(), 1);
1524
1525 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1526 CHECK_VALID_SIZE(outputs.size(), 1);
1527
1528 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1529 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1530
1531 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1532 armnn::TensorInfo outputTensorInfo =
1533 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1534 inputTensorInfo);
1535
1536 ReshapeDescriptor reshapeDesc;
1537 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1538
1539 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1540 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1541 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1542
1543 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1544 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1545
1546 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1547 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1548}
1549
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001550void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1551{
1552 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1553
1554 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1555 CHECK_VALID_SIZE(inputs.size(), 4);
1556
1557 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1558 CHECK_VALID_SIZE(outputs.size(), 1);
1559
1560 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1561 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1562
1563 StridedSliceDescriptor desc;
1564 desc.m_BeginMask = options->begin_mask;
1565 desc.m_EllipsisMask = options->ellipsis_mask;
1566 desc.m_EndMask = options->end_mask;
1567 desc.m_NewAxisMask = options->new_axis_mask;
1568 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1569 desc.m_DataLayout = armnn::DataLayout::NHWC;
1570
1571 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1572 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1573
1574 std::vector<int> begin(beginTensorInfo.GetNumElements());
1575 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1576
1577 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1578 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1579
1580 std::vector<int> end(endTensorInfo.GetNumElements());
1581 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1582
1583 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1584 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1585
1586 std::vector<int> stride(strideTensorInfo.GetNumElements());
1587 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1588
1589 desc.m_Begin = begin;
1590 desc.m_End = end;
1591 desc.m_Stride = stride;
1592
1593 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1594 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1595
1596 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1597 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1598
1599 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1600 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1601
1602 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1603 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1604}
1605
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001606void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1607{
1608 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1609
1610 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1611 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1612
1613 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1614 CHECK_VALID_SIZE(inputs.size(), 2);
1615
1616 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1617 CHECK_VALID_SIZE(outputs.size(), 1);
1618
1619 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1620 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1621
1622 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1623 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1624
1625 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1626 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1627
1628 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1629 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1630 {
1631 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1632 }
1633 else
1634 {
1635 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1636 }
1637
1638 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1639
1640 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1641 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1642}
1643
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001644void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1645{
1646 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1647
1648 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1649 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1650
1651 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1652 CHECK_VALID_SIZE(inputs.size(), 2);
1653
1654 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1655 CHECK_VALID_SIZE(outputs.size(), 1);
1656
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001657 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1658 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1659
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001660 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1661 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1662
1663 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1664 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1665
1666 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001667 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1668 {
1669 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1670 }
1671 else
1672 {
1673 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1674 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001675
1676 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1677
1678 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1679 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1680}
1681
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001682void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1683{
1684 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1685
1686 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1687 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1688
1689 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1690 CHECK_VALID_SIZE(inputs.size(), 2);
1691
1692 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1693 CHECK_VALID_SIZE(outputs.size(), 1);
1694
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001695 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1696 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1697
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001698 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1699 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1700
1701 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1702 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1703
1704 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001705 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1706 {
1707 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1708 }
1709 else
1710 {
1711 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1712 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001713
1714 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1715
1716 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1717 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1718}
1719
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001720void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1721{
1722 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1723
1724 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1725
1726 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1727 CHECK_VALID_SIZE(outputs.size(), 1);
1728
1729 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1730 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1731
1732 armnn::MeanDescriptor desc;
1733 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1734 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1735 desc.m_Axis = axis;
1736
1737 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1738 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1739
1740 desc.m_KeepDims =
1741 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1742 true : false;
1743
1744 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1745 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1746
1747 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1748
1749 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1750 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1751
1752 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1753 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1754}
1755
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001756void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1757{
1758 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1759
1760 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1761
1762 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1763 CHECK_VALID_SIZE(outputs.size(), 1);
1764
1765 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1766 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1767
1768 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1769 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1770
1771 size_t step = 2;
1772 armnn::PadDescriptor desc;
1773 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1774 {
1775 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1776 }
1777
1778 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1779 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1780
1781 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1782 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1783
1784 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1785 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1786
1787 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1788 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1789}
1790
Sadik Armagan66dedc72019-12-10 16:32:07 +00001791void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
1792{
1793 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1794
1795 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1796 CHECK_VALID_SIZE(inputs.size(), 1);
1797
1798 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1799 CHECK_VALID_SIZE(outputs.size(), 1);
1800
1801 auto layerName = boost::str(boost::format("Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
1802
1803 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
1804 BOOST_ASSERT(layer != nullptr);
1805
1806 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1807 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1808
1809 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1810 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1811
1812 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1813 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1814}
Finn Williamsc42c3842019-01-22 14:18:11 +00001815
Sadik Armagan58f39192018-09-17 14:14:39 +01001816void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1817{
Finn Williamsc42c3842019-01-22 14:18:11 +00001818 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001819}
1820
1821void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1822{
Finn Williamsc42c3842019-01-22 14:18:11 +00001823 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1824}
Sadik Armagan58f39192018-09-17 14:14:39 +01001825
Finn Williamsc42c3842019-01-22 14:18:11 +00001826void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1827{
1828 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1829}
1830
Nina Drozd99851762019-04-09 09:37:38 +01001831void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1832{
1833 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1834}
1835
Finn Williamsc42c3842019-01-22 14:18:11 +00001836
1837void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1838{
1839 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001840 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1841 boost::ignore_unused(operatorPtr);
1842
1843 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1844 CHECK_VALID_SIZE(inputs.size(), 1);
1845
1846 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1847 CHECK_VALID_SIZE(outputs.size(), 1);
1848
Finn Williamsc42c3842019-01-22 14:18:11 +00001849 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001850 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001851 activationDesc.m_Function = activationType;
1852
1853 switch (activationType)
1854 {
1855 case ActivationFunction::ReLu:
1856 {
1857 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1858 break;
1859 }
1860 case ActivationFunction::BoundedReLu:
1861 {
1862 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1863 activationDesc.m_A = 6.0f;
1864 activationDesc.m_B = 0.0f;
1865 break;
1866 }
1867 case ActivationFunction::Sigmoid:
1868 {
1869 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1870 break;
1871 }
Nina Drozd99851762019-04-09 09:37:38 +01001872 case ActivationFunction::TanH:
1873 {
1874 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1875 activationDesc.m_A = 1.0f;
1876 activationDesc.m_B = 1.0f;
1877 break;
1878 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001879 default:
1880 {
1881 throw ParseException(
1882 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1883 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1884 }
1885 }
1886
1887 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001888
1889 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1890 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1891
1892 // register the input connection slots for the layer, connections are made after all layers have been created
1893 // only the tensors for the inputs are relevant, exclude the const tensors
1894 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1895 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1896
1897 // register the output connection slots for the layer, connections are made after all layers have been created
1898 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1899 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1900}
Sadikb94967b2018-09-19 15:30:00 +01001901armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1902 const std::vector<int32_t> & targetDimsIn)
1903{
1904 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1905 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1906
1907 if (stretchDim != targetDimsIn.end())
1908 {
1909 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1910 {
1911 throw ParseException(
1912 boost::str(
1913 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1914 }
1915
1916 auto targetNumElements =
1917 boost::numeric_cast<unsigned int>(
1918 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1919
1920 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1921 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1922 }
1923
1924 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1925
1926 TensorInfo reshapeInfo = inputTensorInfo;
1927 reshapeInfo.SetShape(outputShape);
1928
1929 return reshapeInfo;
1930}
1931
1932void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1933{
1934 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1935
1936 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001937
1938 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1939 CHECK_VALID_SIZE(outputs.size(), 1);
1940
1941 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1942 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1943
1944 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001945 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1946 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001947 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1948
kevmay0171972a82018-12-17 14:28:03 +00001949 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001950 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1951 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001952 {
1953 std::stringstream ss;
1954 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001955 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001956 << " does not equal output shape "
1957 << actualOutputTensorInfo.GetShape()
1958 << ": "
1959 << CHECK_LOCATION().AsString();
1960 throw ParseException(ss.str());
1961 }
1962
Sadikb94967b2018-09-19 15:30:00 +01001963 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001964 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001965
1966 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1967 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001968 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001969
1970 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1971 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1972
1973 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1974 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1975}
1976
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001977void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1978{
Sadik Armagana3b31f02019-12-05 09:08:53 +00001979 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
1980}
1981
1982void TfLiteParser::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
1983{
1984 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
1985}
1986
1987void TfLiteParser::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
1988{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001989 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1990
1991 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1992 CHECK_VALID_SIZE(inputs.size(), 2);
1993
1994 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1995 CHECK_VALID_SIZE(outputs.size(), 1);
1996
1997 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1998
1999 // Data for the parsed tensor args (size) must be stored locally.
2000 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2001
2002 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2003 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2004
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002005 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002006 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002007 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002008 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2009 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002010
Sadik Armagana3b31f02019-12-05 09:08:53 +00002011 auto layerName = str(boost::format("Resize:"));
2012
2013 switch (resizeMethod)
2014 {
2015 case ResizeMethod::Bilinear:
2016 {
2017 layerName += str(boost::format("BILINEAR:%1%:%2%") % subgraphIndex % operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002018
2019 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2020 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2021
2022 desc.m_BilinearAlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002023 break;
2024 }
2025 case ResizeMethod::NearestNeighbor:
2026 {
2027 layerName += str(boost::format("NEARESTNEIGHBOR:%1%:%2%") % subgraphIndex % operatorIndex);
2028 break;
2029 }
2030 default:
2031 {
2032 throw ParseException(
2033 boost::str(boost::format("Unexpected ResizeMethod[%1%] when creating layerName "
2034 " %2% ") %static_cast<int>(resizeMethod)% CHECK_LOCATION().AsString()));
2035 }
2036 }
2037
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002038 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002039
2040 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2041 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2042
2043 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2044 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2045
2046 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2047 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2048}
2049
Sadik Armagan479045b2018-10-01 11:51:37 +01002050void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
2051{
2052 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2053
2054 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2055 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2056
2057 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2058
2059 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2060 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2061 CHECK_VALID_SIZE(outputs.size(), 1);
2062
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002063 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2064 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002065
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002066 const unsigned int concatDimInput = static_cast<unsigned int>(
2067 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002068
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002069 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2070 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002071
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002072 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002073
2074 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2075 {
2076 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2077
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002078 // This set up concatDescriptor view origin
2079 armnnUtils::ProcessConcatInputTensorInfo(
2080 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002081 }
2082
2083 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01002084 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01002085
2086 BOOST_ASSERT(layer != nullptr);
2087
2088 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2089 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01002090
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002091 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002092
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002093 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002094
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002095 // add fused activation layer
2096 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002097
Sadik Armagan479045b2018-10-01 11:51:37 +01002098 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2099 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2100}
2101
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002102void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
2103{
2104 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2105
2106 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2107 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2108
2109 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2110
2111 FullyConnectedDescriptor desc;
2112 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002113 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002114
2115 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2116 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2117 CHECK_VALID_SIZE(outputs.size(), 1);
2118
2119 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2120
2121 // Fully Connected Layer accepts two dimensional weights input
2122 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2123 if (weightsDimension != 2)
2124 {
2125 throw ParseException(
2126 boost::str(
2127 boost::format(
2128 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
2129 "Node %2%")
2130 % weightsDimension
2131 % CHECK_LOCATION().AsString()));
2132 }
2133
Matteo Martincigh747ef822018-12-18 09:26:39 +00002134 auto filterTensorAndData = CreateConstTensor(inputs[1],
2135 filterTensorInfo,
2136 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002137 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002138 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
2139
2140 if (inputs.size() == 3)
2141 {
2142 desc.m_BiasEnabled = true;
2143 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00002144 auto biasTensorAndData = CreateConstTensor(inputs[2],
2145 biasTensorInfo,
2146 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002147 layer = m_Network->AddFullyConnectedLayer(desc,
2148 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002149 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002150 layerName.c_str());
2151 }
2152 else
2153 {
2154 layer = m_Network->AddFullyConnectedLayer(desc,
2155 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002156 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002157 layerName.c_str());
2158 }
2159 BOOST_ASSERT(layer != nullptr);
2160
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002161 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2162
2163 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2164
2165 if (inputTensorInfo.GetNumDimensions() > 2)
2166 {
2167 // Add reshape to flatten to 2D [batch_size, input_size],
2168 // where "input_size" corresponds to the number of inputs to the layer,
2169 // matching the second dimension of weights,
2170 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2171 std::vector<unsigned int> reshapedDimensions(2);
2172 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2173 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2174
2175 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2176 {
2177 throw ParseException(
2178 boost::str(
2179 boost::format(
2180 "Failed to deduce input tensor shape from filter size %1%")
2181 % reshapedDimensions[1]
2182 % CHECK_LOCATION().AsString()));
2183 }
2184
2185 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2186 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2187
2188 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2189 armnn::ReshapeDescriptor desc;
2190 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2191 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2192
2193 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2194 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2195
2196 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2197 }
2198 else
2199 {
2200 // register the input connection slot for the layer
2201 // only the tensors for the inputs are relevant, exclude the const tensors
2202 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2203 }
2204
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002205 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2206 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2207
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002208 // we need to add the activation layer and fortunately we don't need to care about the data layout
2209 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2210 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002211
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002212 // register the output connection slots for the layer, connections are made after all layers have been created
2213 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2214 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2215}
2216
keidav011b3e2ea2019-02-21 10:07:37 +00002217void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
2218{
2219 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2220
2221 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2222
2223 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2224 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2225 CHECK_VALID_SIZE(outputs.size(), 4);
2226
2227 // Obtain custom options from flexbuffers
2228 auto custom_options = operatorPtr->custom_options;
2229 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2230
2231 // Obtain descriptor information from tf lite
2232 DetectionPostProcessDescriptor desc;
2233 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2234 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2235 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2236 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2237 desc.m_NumClasses = m["num_classes"].AsUInt32();
2238 desc.m_ScaleH = m["h_scale"].AsFloat();
2239 desc.m_ScaleW = m["w_scale"].AsFloat();
2240 desc.m_ScaleX = m["x_scale"].AsFloat();
2241 desc.m_ScaleY = m["y_scale"].AsFloat();
2242
keidav0107d58c72019-02-26 11:57:39 +00002243 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002244 {
keidav0107d58c72019-02-26 11:57:39 +00002245 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002246 }
2247 if (!(m["detections_per_class"].IsNull()))
2248 {
2249 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2250 }
2251
2252 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2253 {
2254 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2255 "must be positive and less than or equal to 1.");
2256 }
2257
2258 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2259 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2260 armnn::Optional<armnn::PermutationVector&>());
2261
2262 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2263 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2264 layerName.c_str());
2265
2266 BOOST_ASSERT(layer != nullptr);
2267
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002268 // The model does not specify the output shapes.
2269 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2270 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2271 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2272 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2273 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2274 m_OverridenOutputShapes.push_back({ 1 });
2275
keidav011b3e2ea2019-02-21 10:07:37 +00002276 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2277 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002278 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002279 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2280 }
2281
2282 // Register the input connection slots for the layer, connections are made after all layers have been created
2283 // only the tensors for the inputs are relevant, exclude the const tensors
2284 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2285 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2286
2287 // Register the output connection slots for the layer, connections are made after all layers have been created
2288 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2289 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2290 outputTensorIndexes[1],
2291 outputTensorIndexes[2],
2292 outputTensorIndexes[3]});
2293}
2294
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002295/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2296void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2297{
2298 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2299
2300 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2301 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2302 CHECK_VALID_SIZE(outputs.size(), 1);
2303
2304 if (inputs.size() < 1)
2305 {
2306 throw ParseException("Pack must have at least one input.");
2307 }
2308
2309 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2310 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2311
2312 StackDescriptor desc;
2313 desc.m_Axis = static_cast<uint32_t>(options->axis);
2314 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2315
2316 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2317 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2318 desc.m_InputShape = inputTensorInfo.GetShape();
2319
2320 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2321 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2322
2323 BOOST_ASSERT(layer != nullptr);
2324
2325 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2326 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2327
2328 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2329 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2330
2331 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2332 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2333}
2334
Nina Drozd200e3802019-04-15 09:47:39 +01002335void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2336{
2337 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2338
2339 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2340 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2341
2342 // This unpackAxis indicates the axis to unpack
2343 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2344
2345 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2346 CHECK_VALID_SIZE(inputs.size(), 1);
2347
2348 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002349
2350 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2351 {
2352 throw ParseException(
2353 boost::str(
2354 boost::format(
2355 "The unpack axis: %1% cannot be greater than or equal to "
2356 "the number of input dimension %2% %3%")
2357 % unpackAxis
2358 % inputTensorInfo.GetNumDimensions()
2359 % CHECK_LOCATION().AsString()));
2360 }
2361
Nina Drozd200e3802019-04-15 09:47:39 +01002362 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2363 // If num is not defined, automatically infer from the length of the dimension axis.
2364 if(unpackNum == 0)
2365 {
2366 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2367 }
2368
2369 // If unpack number cannot be inferred and is still zero, throw ParseException.
2370 if(unpackNum == 0)
2371 {
2372 throw ParseException("Number to unpack must greater than zero.");
2373 }
2374
2375 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2376 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2377
2378 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2379 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2380
2381 // Add current input shape to unpackDimSizes
2382 for (unsigned int i = 0; i < inputDimSize; ++i)
2383 {
2384 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2385 }
2386
2387 if (unpackDimSizes[unpackAxis] != unpackNum)
2388 {
2389 throw ParseException("Number to unpack must be the same as length of the dimension to "
2390 "unpack along.");
2391 }
2392
2393 unpackDimSizes[unpackAxis] /= unpackNum;
2394
2395 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2396 for (unsigned int j = 0; j < unpackNum; ++j)
2397 {
2398 // Set the size of the views.
2399 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2400 {
2401 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2402 }
2403 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2404 }
2405
2406 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2407 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2408
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002409 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2410 unpackDimSizes.data());
2411
Nina Drozd200e3802019-04-15 09:47:39 +01002412 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2413 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2414
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002415 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2416 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2417 {
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002418 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002419 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2420 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002421 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002422 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2423
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002424 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2425 outputTensorInfo.GetDataType(),
2426 outputTensorInfo.GetQuantizationScale(),
2427 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002428 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2429
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002430 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002431
2432 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2433 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2434 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2435 }
Nina Drozd200e3802019-04-15 09:47:39 +01002436}
2437
Nina Drozd0324f482019-04-08 10:52:10 +01002438void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2439{
2440 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2441
2442 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2443 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2444
2445 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2446
Nina Drozd200e3802019-04-15 09:47:39 +01002447 // If number of splits cannot be inferred and is zero, throw ParseException.
2448 if(numSplits == 0)
2449 {
2450 throw ParseException("Number to splits must greater than zero.");
2451 }
2452
Nina Drozd0324f482019-04-08 10:52:10 +01002453 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2454 CHECK_VALID_SIZE(inputs.size(), 2);
2455 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2456 CHECK_VALID_SIZE(outputs.size(), numSplits);
2457
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002458 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2459 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002460
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002461 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2462 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2463 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2464
2465 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2466 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002467
2468 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2469 if (splitDim == 0 || splitDim == 2)
2470 {
2471 throw ParseException(
2472 boost::str(
2473 boost::format(
2474 "Dimension %1% for split is not supported by Armnn. %2%")
2475 % splitDim
2476 % CHECK_LOCATION().AsString()));
2477 }
2478
2479 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002480 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002481 {
2482 throw ParseException(
2483 boost::str(
2484 boost::format(
2485 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002486 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002487 % inputTensorInfo.GetNumDimensions()
2488 % MaxNumOfTensorDimensions
2489 % CHECK_LOCATION().AsString()));
2490 }
2491
2492 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2493
2494 // Add current input shape to splitterDimSizes
2495 for (unsigned int i = 0; i < inputDimSize; ++i)
2496 {
2497 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2498 }
2499
2500 if (splitterDimSizes[splitDim] % numSplits != 0)
2501 {
2502 throw ParseException("Number of splits must evenly divide the dimension");
2503 }
2504 splitterDimSizes[splitDim] /= numSplits;
2505
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002506 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002507 for (unsigned int j = 0; j < numSplits; ++j)
2508 {
2509 // Set the size of the views.
2510 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2511 {
2512 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2513 }
2514 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2515 }
2516
2517 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2518 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2519
2520 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002521 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002522
Nina Drozd0324f482019-04-08 10:52:10 +01002523 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2524 {
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002525 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k]);
2526 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002527 }
2528
2529 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2530 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2531}
2532
Sadik Armagan58f39192018-09-17 14:14:39 +01002533armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2534 unsigned int outputSlot,
2535 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002536{
2537 ActivationDescriptor activationDesc;
2538 std::string layerName = prevLayer->GetName();
2539
2540 switch(activationType)
2541 {
2542 case tflite::ActivationFunctionType_NONE:
2543 {
2544 // this is a no-op: return previous layer
2545 return prevLayer;
2546 }
2547 case tflite::ActivationFunctionType_RELU:
2548 {
2549 activationDesc.m_Function = ActivationFunction::ReLu;
2550 layerName += ":RELU";
2551 break;
2552 }
2553 case tflite::ActivationFunctionType_RELU6:
2554 {
2555 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2556 activationDesc.m_A = 6.0f;
2557 activationDesc.m_B = 0.0f;
2558 layerName += ":RELU6";
2559 break;
2560 }
2561 case tflite::ActivationFunctionType_TANH:
2562 {
2563 activationDesc.m_Function = ActivationFunction::TanH;
2564 activationDesc.m_A = 1.0f;
2565 activationDesc.m_B = 1.0f;
2566 layerName += ":TANH";
2567 break;
2568 }
2569
2570 // I only put these here as a reminder what others we could support
2571 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2572 case tflite::ActivationFunctionType_SIGN_BIT:
2573 default:
2574 {
2575 throw ParseException(
2576 boost::str(
2577 boost::format("TfLite parser doesn't suppport fused activation: "
2578 "%1%/%2% %3% ") %
2579 activationType %
2580 tflite::EnumNameActivationFunctionType(activationType) %
2581 CHECK_LOCATION().AsString()));
2582
2583 }
2584 }
2585
2586 IConnectableLayer* activationLayer =
2587 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2588
2589 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2590 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2591 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2592 return activationLayer;
2593}
2594
2595TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2596{
2597 if (fileName == nullptr)
2598 {
2599 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2600 CHECK_LOCATION().AsString()));
2601 }
2602 boost::system::error_code errorCode;
2603 boost::filesystem::path pathToFile(fileName);
2604 if (!boost::filesystem::exists(pathToFile, errorCode))
2605 {
2606 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2607 fileName %
2608 errorCode %
2609 CHECK_LOCATION().AsString()));
2610 }
2611 std::ifstream file(fileName, std::ios::binary);
2612 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2613 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2614 fileContent.size());
2615}
2616
2617TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2618{
2619 if (binaryContent == nullptr)
2620 {
2621 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2622 CHECK_LOCATION().AsString()));
2623 }
2624 flatbuffers::Verifier verifier(binaryContent, len);
2625 if (verifier.VerifyBuffer<tflite::Model>() == false)
2626 {
2627 throw ParseException(
2628 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2629 "flatbuffers format. size:%1% %2%") %
2630 len %
2631 CHECK_LOCATION().AsString()));
2632 }
2633 return tflite::UnPackModel(binaryContent);
2634}
2635
2636TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2637 size_t subgraphIndex,
2638 size_t operatorIndex)
2639{
2640 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2641
Derek Lambertiff05cc52019-04-26 13:05:17 +01002642 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2643 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002644
2645 size_t inputCount = operatorPtr->inputs.size();
2646 TensorRawPtrVector result(inputCount);
2647 for (size_t i=0; i<inputCount; ++i)
2648 {
2649 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002650 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002651 }
2652 return result;
2653}
2654
2655TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2656 size_t subgraphIndex,
2657 size_t operatorIndex)
2658{
2659 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2660
Derek Lambertiff05cc52019-04-26 13:05:17 +01002661 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2662 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002663
2664 size_t outputCount = operatorPtr->outputs.size();
2665 TensorRawPtrVector result(outputCount);
2666 for (size_t i=0; i<outputCount; ++i)
2667 {
2668 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2669 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002670 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002671 }
2672 return result;
2673}
2674
2675TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2676 size_t subgraphIndex)
2677{
2678 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002679 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002680
Derek Lambertiff05cc52019-04-26 13:05:17 +01002681 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002682 TensorIdRawPtrVector result(inputCount);
2683 for (size_t i=0; i<inputCount; ++i)
2684 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002685 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002686 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002687 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002688 }
2689 return result;
2690}
2691
2692TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2693 size_t subgraphIndex)
2694{
2695 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002696 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002697
Derek Lambertiff05cc52019-04-26 13:05:17 +01002698 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002699 TensorIdRawPtrVector result(outputCount);
2700 for (size_t i=0; i<outputCount; ++i)
2701 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002702 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2703 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002704 }
2705 return result;
2706}
2707
2708std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2709 size_t subgraphIndex,
2710 size_t operatorIndex)
2711{
2712 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002713 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2714 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002715 return operatorPtr->inputs;
2716}
2717
2718std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2719 size_t subgraphIndex,
2720 size_t operatorIndex)
2721{
2722 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002723 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2724 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002725 return operatorPtr->outputs;
2726}
2727
2728void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2729 size_t operatorIndex,
2730 IConnectableLayer* layer,
2731 const std::vector<unsigned int>& tensorIndexes)
2732{
2733 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2734 BOOST_ASSERT(layer != nullptr);
2735 if (tensorIndexes.size() != layer->GetNumInputSlots())
2736 {
2737 throw ParseException(
2738 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2739 " for subgraph:%3% operator index:%4% %5%") %
2740 tensorIndexes.size() %
2741 layer->GetNumInputSlots() %
2742 subgraphIndex %
2743 operatorIndex %
2744 CHECK_LOCATION().AsString()));
2745 }
2746
2747 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2748 {
2749 unsigned int tensorIndex = tensorIndexes[slotIndex];
2750 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2751 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2752 }
2753}
2754
2755void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2756 size_t operatorIndex,
2757 IConnectableLayer* layer,
2758 const std::vector<unsigned int>& tensorIndexes)
2759{
2760 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2761 BOOST_ASSERT(layer != nullptr);
2762 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2763 {
2764 throw ParseException(
2765 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2766 " for subgraph:%3% operator index:%4% %5%") %
2767 tensorIndexes.size() %
2768 layer->GetNumOutputSlots() %
2769 subgraphIndex %
2770 operatorIndex %
2771 CHECK_LOCATION().AsString()));
2772 }
2773
2774 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2775 {
2776 unsigned int tensorIndex = tensorIndexes[slotIndex];
2777 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2778 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2779 }
2780}
2781
2782void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2783{
2784 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2785
2786 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2787 for (auto const & tensorIdAndPtr : inputs)
2788 {
2789 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2790 IConnectableLayer* layer =
2791 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2792
2793 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2794 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2795
2796 RegisterOutputSlots(subgraphIndex,
2797 VIRTUAL_OPERATOR_ID,
2798 layer,
2799 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2800 }
2801}
2802
2803void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2804{
2805 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2806
2807 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2808 for (auto const & tensorIdAndPtr : outputs)
2809 {
2810 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2811 IConnectableLayer* layer =
2812 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2813
2814 RegisterInputSlots(subgraphIndex,
2815 VIRTUAL_OPERATOR_ID,
2816 layer,
2817 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2818 }
2819}
2820
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002821void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2822{
2823 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2824
Derek Lambertiff05cc52019-04-26 13:05:17 +01002825 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002826 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2827 {
2828 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2829 {
2830 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2831 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2832 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002833 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002834 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2835 auto tensorAndData = CreateConstTensor(tensorPtr,
2836 tensorInfo,
2837 armnn::Optional<armnn::PermutationVector&>());
2838
2839 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2840 IConnectableLayer *layer =
2841 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2842
2843 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2844 RegisterOutputSlots(subgraphIndex,
2845 VIRTUAL_OPERATOR_ID,
2846 layer,
2847 { tensorIndex });
2848
2849 }
2850 }
2851 }
2852}
2853
telsoa01c577f2c2018-08-31 09:22:23 +01002854// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2855TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2856{
2857 CHECK_BUFFER(model, bufferIndex);
2858 return model->buffers[bufferIndex].get();
2859}
2860
Matteo Martincigh747ef822018-12-18 09:26:39 +00002861template<typename T>
2862std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2863TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2864 TfLiteParser::TensorRawPtr tensorPtr,
2865 armnn::TensorInfo& tensorInfo,
2866 armnn::Optional<armnn::PermutationVector&> permutationVector)
2867{
2868 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2869 tensorPtr,
2870 tensorInfo,
2871 permutationVector);
2872 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2873 return std::make_pair(constData.first, std::move(storage));
2874}
2875
telsoa01c577f2c2018-08-31 09:22:23 +01002876std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2877TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002878 armnn::TensorInfo& tensorInfo,
2879 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002880{
2881 CHECK_TENSOR_PTR(tensorPtr);
2882 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2883 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2884
2885 switch (tensorInfo.GetDataType())
2886 {
2887 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002888 return CreateConstTensorAndStoreData<float>(bufferPtr,
2889 tensorPtr,
2890 tensorInfo,
2891 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00002892 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002893 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2894 tensorPtr,
2895 tensorInfo,
2896 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00002897 case armnn::DataType::QSymmS8:
2898 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
2899 tensorPtr,
2900 tensorInfo,
2901 permutationVector);
Keith Davisa9057352020-02-19 10:08:33 +00002902 case armnn::DataType::QAsymmS8:
2903 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
2904 tensorPtr,
2905 tensorInfo,
2906 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002907 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002908 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2909 tensorPtr,
2910 tensorInfo,
2911 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002912 default:
2913 {
2914 std::stringstream errString;
2915 errString << "Unexpected datatype when creating const tensor: "
2916 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2917 << " shape:" << tensorInfo.GetShape()
2918 << CHECK_LOCATION().AsString();
2919 throw ParseException(errString.str());
2920 }
2921 }
2922}
2923
2924BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2925 const std::string& name) const
2926{
2927 CHECK_SUBGRAPH(m_Model, subgraphId);
2928 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2929 for (auto const & input : inputs)
2930 {
2931 if (input.second->name == name)
2932 {
2933 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2934 return std::make_pair(bindingId, ToTensorInfo(input.second));
2935 }
2936 }
2937
2938 std::stringstream bindings;
2939 for (auto const & input : inputs)
2940 {
2941 bindings << "'" << input.second->name << "' ";
2942 }
2943
2944 throw ParseException(
2945 boost::str(
2946 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2947 "Possible inputs are: [%3%] %4%") %
2948 subgraphId %
2949 name %
2950 bindings.str() %
2951 CHECK_LOCATION().AsString()));
2952}
2953
2954BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2955 const std::string& name) const
2956{
2957 CHECK_SUBGRAPH(m_Model, subgraphId);
2958 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002959 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002960 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002961 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002962 if (output.second->name == name)
2963 {
2964 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002965 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2966 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2967 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002968 }
2969 }
2970
2971 std::stringstream bindings;
2972 for (auto const & output : outputs)
2973 {
2974 bindings << "'" << output.second->name << "' ";
2975 }
2976
2977 throw ParseException(
2978 boost::str(
2979 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2980 "Possible outputs are: [%3%] %4%") %
2981 subgraphId %
2982 name %
2983 bindings.str() %
2984 CHECK_LOCATION().AsString()));
2985}
2986
2987size_t TfLiteParser::GetSubgraphCount() const
2988{
2989 return m_Model->subgraphs.size();
2990}
2991
2992std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2993{
2994 CHECK_SUBGRAPH(m_Model, subgraphId);
2995 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2996 std::vector<std::string> result;
2997 result.reserve(inputs.size());
2998 for (auto const & input : inputs)
2999 {
3000 result.push_back(input.second->name);
3001 }
3002 return result;
3003}
3004
3005std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
3006{
3007 CHECK_SUBGRAPH(m_Model, subgraphId);
3008 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3009 std::vector<std::string> result;
3010 result.reserve(outputs.size());
3011 for (auto const & output : outputs)
3012 {
3013 result.push_back(output.second->name);
3014 }
3015 return result;
3016}
3017
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003018ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003019{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003020 return new TfLiteParser(options);
telsoa01c577f2c2018-08-31 09:22:23 +01003021}
3022
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003023ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003024{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003025 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +01003026}
3027
3028void ITfLiteParser::Destroy(ITfLiteParser* parser)
3029{
3030 delete parser;
3031}
3032
3033TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
3034: m_FloatData(std::move(data))
3035, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003036, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003037, m_Int32Data(nullptr)
3038{
3039}
3040
3041TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
3042: m_FloatData(nullptr)
3043, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00003044, m_Int8Data(nullptr)
3045, m_Int32Data(nullptr)
3046{
3047}
3048
3049TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
3050: m_FloatData(nullptr)
3051, m_Uint8Data(nullptr)
3052, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01003053, m_Int32Data(nullptr)
3054{
3055}
3056
3057TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
3058: m_FloatData(nullptr)
3059, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003060, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003061, m_Int32Data(std::move(data))
3062{
3063}
3064
3065} // armnnTfLiteParser