blob: 593f3eb02d569485866e71fc9e7d0b7b73a1635e [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Bentham39ef3e52020-01-20 10:09:09 +00008#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +01009#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000010#include <armnn/Logging.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010011#include <armnn/TypesUtils.hpp>
12#include <boost/filesystem.hpp>
13
14// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000015#include <armnnUtils/Permute.hpp>
16
Sadik Armagan479045b2018-10-01 11:51:37 +010017#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010018#include <VerificationHelpers.hpp>
19
20// The generated code based on the Tf Lite schema:
21#include <schema_generated.h>
22
Matteo Martincighe011d202019-11-28 11:35:47 +000023#include <flatbuffers/flexbuffers.h>
24
telsoa01c577f2c2018-08-31 09:22:23 +010025#include <boost/core/ignore_unused.hpp>
26#include <boost/assert.hpp>
27#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010028#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010029
30#include <fstream>
31#include <algorithm>
32#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010033#include <numeric>
telsoa01c577f2c2018-08-31 09:22:23 +010034
35using namespace armnn;
36using armnn::CheckLocation;
37namespace armnnTfLiteParser
38{
39namespace
40{
jimfly01c25411c2018-11-14 17:47:22 +000041
telsoa01c577f2c2018-08-31 09:22:23 +010042const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
43
44void CheckSubgraph(const TfLiteParser::ModelPtr & model,
45 size_t subgraphIndex,
46 const CheckLocation & location)
47{
48 if (model.get() == nullptr)
49 {
50 throw ParseException(
51 boost::str(
52 boost::format("%1% was called with invalid (null) model. "
53 "Possible reason is that the model is not yet loaded and Unpack(ed). "
54 "subgraph:%2% at %3%") %
55 location.m_Function %
56 subgraphIndex %
57 location.FileLine()));
58 }
59 else if (subgraphIndex >= model->subgraphs.size())
60 {
61 throw ParseException(
62 boost::str(
63 boost::format("%1% was called with an invalid subgraph index. "
64 "subgraph:%2% at %3%") %
65 location.m_Function %
66 subgraphIndex %
67 location.FileLine()));
68 }
69}
70
71#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
72 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
73
74void CheckModel(const TfLiteParser::ModelPtr & model,
75 size_t subgraphIndex,
76 size_t operatorIndex,
77 const CheckLocation & location)
78{
79 if (model.get() == nullptr)
80 {
81 throw ParseException(
82 boost::str(
83 boost::format("%1% was called with invalid (null) model. "
84 "Possible reason is that the model is not yet loaded and Unpack(ed). "
85 "subgraph:%2% operator:%3% at %4%") %
86 location.m_Function %
87 subgraphIndex %
88 operatorIndex %
89 location.FileLine()));
90 }
91 else if (subgraphIndex >= model->subgraphs.size())
92 {
93 throw ParseException(
94 boost::str(
95 boost::format("%1% was called with an invalid subgraph index. "
96 "subgraph:%2% operator:%3% at %4%") %
97 location.m_Function %
98 subgraphIndex %
99 operatorIndex %
100 location.FileLine()));
101 }
102 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
103 operatorIndex != VIRTUAL_OPERATOR_ID)
104 {
105 throw ParseException(
106 boost::str(
107 boost::format("%1% was called with an invalid operator index. "
108 "subgraph:%2% operator:%3% at %4%") %
109 location.m_Function %
110 subgraphIndex %
111 operatorIndex %
112 location.FileLine()));
113 }
114}
115
116#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
117 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
118
119void CheckTensor(const TfLiteParser::ModelPtr & model,
120 size_t subgraphIndex,
121 size_t tensorIndex,
122 const CheckLocation & location)
123{
124 // not checking model, because I assume CHECK_MODEL already run
125 // and checked that. An assert would do.
126 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
127
128 // also subgraph index should be checked by CHECK_MODEL so
129 // I only add an assert here
130 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
131
132 // the tensor index is the only one to check here
133 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
134 {
135 throw ParseException(
136 boost::str(
137 boost::format("%1% was called with an invalid tensor index. "
138 "subgraph:%2% tensor:%3% at %4%") %
139 location.m_Function %
140 subgraphIndex %
141 tensorIndex %
142 location.FileLine()));
143 }
144}
145
146#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
147 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
148
149void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
150 const CheckLocation & location)
151{
152 if (rawPtr == nullptr)
153 {
154 throw ParseException(
155 boost::str(
156 boost::format("%1% was called with a null tensor pointer. "
157 "at %2%") %
158 location.m_Function %
159 location.FileLine()));
160
161 }
162}
163
164#define CHECK_TENSOR_PTR(TENSOR_PTR) \
165 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
166
167void CheckBuffer(const TfLiteParser::ModelPtr & model,
168 size_t bufferIndex,
169 const CheckLocation & location)
170{
171 if (model.get() == nullptr)
172 {
173 throw ParseException(
174 boost::str(
175 boost::format("%1% was called with invalid (null) model. "
176 "Possible reason is that the model is not yet loaded and Unpack(ed). "
177 "buffer:%2% at %3%") %
178 location.m_Function %
179 bufferIndex %
180 location.FileLine()));
181 }
182 else if (bufferIndex >= model->buffers.size())
183 {
184 throw ParseException(
185 boost::str(
186 boost::format("%1% was called with an invalid buffer index. "
187 "buffer index:%2% at %3%") %
188 location.m_Function %
189 bufferIndex %
190 location.FileLine()));
191 }
192 else if (model->buffers[bufferIndex].get() == nullptr)
193 {
194 throw ParseException(
195 boost::str(
196 boost::format("The buffer #%1% is null. %3%") %
197 bufferIndex %
198 location.AsString()));
199 }
200}
201
202#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
203 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
204
205void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
206 const armnn::TensorInfo & tensorInfo,
207 uint32_t bufferId,
208 const CheckLocation & location)
209{
210 if (bufferPtr == nullptr)
211 {
212 throw ParseException(
213 boost::str(
214 boost::format("BufferPtr is null for buffer:%1%. %2%") %
215 bufferId %
216 location.AsString()));
217 }
218 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
219 tensorInfo.GetNumBytes() > bufferPtr->data.size())
220 {
221 std::stringstream ss;
222 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
223 << "For tensor: " << tensorInfo.GetShape()
224 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
225 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
226 throw ParseException(ss.str());
227 }
228}
229
230#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
231 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
232
233bool IsActivationSupported(tflite::ActivationFunctionType activationType)
234{
235 switch(activationType)
236 {
237 case tflite::ActivationFunctionType_NONE:
238 case tflite::ActivationFunctionType_RELU:
239 case tflite::ActivationFunctionType_RELU6:
240 case tflite::ActivationFunctionType_TANH:
241 {
242 return true;
243 }
244 default:
245 {
246 return false;
247 }
248 }
249}
250
251#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
252 do { \
253 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
254 { \
255 throw ParseException( \
256 boost::str( \
257 boost::format("TfLite parser doesn't suppport fused activation: " \
258 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
259 OPTION->fused_activation_function % \
260 tflite::EnumNameActivationFunctionType(\
261 OPTION->fused_activation_function) % \
262 __func__ % \
263 SUBGRAPH_INDEX % \
264 OPERATOR_INDEX % \
265 CHECK_LOCATION().FileLine())); \
266 } \
267 } while(false)
268
269
270std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
271{
272 std::vector<unsigned int> result;
273 result.reserve(in.size());
274 for (auto & i : in)
275 {
276 result.push_back(CHECKED_NON_NEGATIVE(i));
277 }
278 return result;
279}
280
281void CalcPadding(uint32_t inputSize,
282 uint32_t filterSize,
283 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100284 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100285 uint32_t& paddingFront,
286 uint32_t& paddingBack,
287 tflite::Padding padding)
288{
289 paddingFront = 0;
290 paddingBack = 0;
291 if (padding == tflite::Padding_SAME)
292 {
293 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100294 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
295 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100296 if (temp > inputSize)
297 {
298 paddingFront = (temp - inputSize) / 2;
299 paddingBack = (temp - inputSize) - paddingFront;
300 }
301 }
302}
303
Keith Davis0c2eeac2020-02-11 16:51:50 +0000304armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes,
305 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
telsoa01c577f2c2018-08-31 09:22:23 +0100306{
307 armnn::DataType type;
308 CHECK_TENSOR_PTR(tensorPtr);
309
310 switch (tensorPtr->type)
311 {
312 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000313 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100314 break;
315 case tflite::TensorType_FLOAT32:
316 type = armnn::DataType::Float32;
317 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000318 case tflite::TensorType_INT8:
Ryan OShea03181ff2020-02-07 17:22:22 +0000319 if (tensorPtr->quantization->zero_point.size() == 1 && tensorPtr->quantization->zero_point[0] != 0)
320 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000321 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000322 type = armnn::DataType::QAsymmS8;
323 }
324 else
325 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000326 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000327 type = armnn::DataType::QSymmS8;
328 }
Finn Williamsed66d142019-12-06 09:55:55 +0000329 break;
330 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000331 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000332 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100333 case tflite::TensorType_INT32:
334 type = armnn::DataType::Signed32;
335 break;
336
337 default:
338 {
339 CheckLocation location = CHECK_LOCATION();
340 throw ParseException(
341 boost::str(
342 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
343 tensorPtr->type %
344 tflite::EnumNameTensorType(tensorPtr->type) %
345 tensorPtr->name %
346 location.AsString()));
347 }
348 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100349 std::vector<unsigned int> safeShape = shapes;
350 if (safeShape.size() == 0)
351 {
352 safeShape.push_back(1);
353 }
354
Keith Davisd305e1a2020-01-22 11:57:54 +0000355 float quantizationScale = 0.0f;
356 int32_t quantizationOffset = 0;
357
358 if (tensorPtr->quantization.get())
359 {
360 if (tensorPtr->quantization->scale.size() <= 1)
361 {
362 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
363 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
364
365 if (tensorPtr->quantization->scale.size() == 1)
366 {
367 quantizationScale = tensorPtr->quantization->scale[0];
368 }
369 if (tensorPtr->quantization->zero_point.size() == 1)
370 {
371 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000372 // but this is what we support at the moment in ArmNN
Keith Davisd305e1a2020-01-22 11:57:54 +0000373 quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
374 }
375
376 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
377 safeShape.data(),
378 type,
379 quantizationScale,
380 quantizationOffset);
381
382 return result;
383 }
384 else
385 {
386 std::vector<float> quantizationScales;
387 std::vector<int32_t> quantizationOffsets;
388
389 // Scale
390 std::copy(tensorPtr->quantization->scale.begin(),
391 tensorPtr->quantization->scale.end(),
392 std::back_inserter(quantizationScales));
393
Keith Davis0c2eeac2020-02-11 16:51:50 +0000394 // QSymmS8 Per-axis
Keith Davisd305e1a2020-01-22 11:57:54 +0000395 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
396 safeShape.data(),
397 type,
398 quantizationScales,
Keith Davis0c2eeac2020-02-11 16:51:50 +0000399 dimensionMappings[boost::numeric_cast<unsigned int>(
400 tensorPtr->quantization->quantized_dimension)]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000401
402 return result;
403 }
404 }
405 else
406 {
407 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
408 safeShape.data(),
409 type,
410 quantizationScale,
411 quantizationOffset);
412 return result;
413 }
telsoa01c577f2c2018-08-31 09:22:23 +0100414}
415
Keith Davis0c2eeac2020-02-11 16:51:50 +0000416armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
417 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000418{
419 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000420 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000421}
422
telsoa01c577f2c2018-08-31 09:22:23 +0100423template<typename T>
424std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
425CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
426 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000427 armnn::TensorInfo& tensorInfo,
428 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100429{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000430 boost::ignore_unused(tensorPtr);
telsoa01c577f2c2018-08-31 09:22:23 +0100431 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
432 BOOST_ASSERT_MSG(bufferPtr != nullptr,
433 boost::str(
434 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
435
436 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000437
438 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
439 {
440 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000441 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
442 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000443 }
444 else
445 {
446 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
447 }
448
telsoa01c577f2c2018-08-31 09:22:23 +0100449 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
450}
451
telsoa01c577f2c2018-08-31 09:22:23 +0100452armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
453{
454 // generate the binding id by shifting the tensor id by 8 bit
455 // and add the subgraph id, which allows 256 subgraphs
456 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
457}
458
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000459bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
460{
461 const unsigned int actualSize = actual.GetNumDimensions();
462 if (actualSize != expected.size())
463 {
464 return false;
465 }
466
467 for (unsigned int i = 0u; i < actualSize; i++)
468 {
469 if (expected[i] < 0 ||
470 actual[i] != static_cast<unsigned int>(expected[i]))
471 {
472 return false;
473 }
474 }
475
476 return true;
477}
478
telsoa01c577f2c2018-08-31 09:22:23 +0100479} // <anonymous>
480
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100481TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
482: m_Options(options)
483, m_Network(nullptr, nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +0100484, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
485{
486 // register supported operators
Sadik Armagan66dedc72019-12-10 16:32:07 +0000487 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000488 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
489 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
490 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
491 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000492 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000493 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
Finn Williamsed66d142019-12-06 09:55:55 +0000494 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000495 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
496 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
497 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
498 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
499 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000500 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000501 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000502 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
503 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
504 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
505 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000506 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
507 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
508 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
509 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
510 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000511 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000512 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
513 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000514 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000515 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
516 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
517 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000518 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
519 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
520 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
521 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100522
523 // register supported custom operators
524 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100525}
526
527void TfLiteParser::ResetParser()
528{
529 m_Network = armnn::INetworkPtr(nullptr, nullptr);
530 m_Model = nullptr;
531 m_SubgraphConnections.clear();
532}
533
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200534void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
535 size_t operatorIndex,
536 IConnectableLayer *layer)
537{
538 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
539 BOOST_ASSERT(layer != nullptr);
540
Derek Lambertiff05cc52019-04-26 13:05:17 +0100541 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
542 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200543
544 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
545
546 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100547 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200548 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100549 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200550
551 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
552 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
553
554 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
555 {
556 uint32_t id = reshapedInputId;
557 reshapedInputId = inputId;
558 inputId = id;
559
560 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
561 inputTensorInfo = ToTensorInfo(tensorPtr);
562 }
563
564 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
565
566 std::vector<unsigned> reshapedDim;
567 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
568 {
569 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
570 }
571
572 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
573 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
574
575 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
576
577 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
578 armnn::ReshapeDescriptor desc;
579 desc.m_TargetShape = reshapedTensorInfo.GetShape();
580 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
581
582 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
583 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
584
585 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
586
587 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
588 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
589}
590
telsoa01c577f2c2018-08-31 09:22:23 +0100591INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
592{
593 ResetParser();
594 m_Model = LoadModelFromFile(graphFile);
595 return CreateNetworkFromModel();
596}
597
598INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
599{
600 ResetParser();
601 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
602 return CreateNetworkFromModel();
603}
604
605INetworkPtr TfLiteParser::CreateNetworkFromModel()
606{
607 m_Network = INetwork::Create();
608 BOOST_ASSERT(m_Model.get() != nullptr);
609
610 bool failedToCreate = false;
611 std::stringstream errors;
612
613 if (m_Model->subgraphs.size() != 1)
614 {
615 throw ParseException(
616 boost::str(
617 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
618 m_Model->subgraphs.size() %
619 CHECK_LOCATION().AsString()));
620 }
621
622 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100623 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100624 {
625 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
626
627 size_t operatorIndex = 0;
628 for (OperatorPtr const & op : subgraph->operators)
629 {
630 try
631 {
telsoa01c577f2c2018-08-31 09:22:23 +0100632 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
633 auto builtinCode = opCodePtr->builtin_code;
634
635 if (builtinCode > tflite::BuiltinOperator_MAX)
636 {
637 throw ParseException(
638 boost::str(
639 boost::format("Operator code %1% is out of range 0-%2%. "
640 "subgraph:%3% operator idx:%4%. %5%") %
641 builtinCode %
642 tflite::BuiltinOperator_MAX %
643 subgraphIndex %
644 operatorIndex %
645 CHECK_LOCATION().AsString()));
646 }
647
648 // lookup and call the parser function
649 auto & parserFunction = m_ParserFunctions[builtinCode];
650 (this->*parserFunction)(subgraphIndex, operatorIndex);
651 }
652 catch (const ParseException& e)
653 {
654 failedToCreate = true;
655 std::stringstream errorString;
656
657 errorString << "Failed to parse operator #" << operatorIndex
658 << " within subgraph #" << subgraphIndex
659 << " error: " << e.what();
Derek Lamberti08446972019-11-26 16:38:31 +0000660 ARMNN_LOG(error) << errorString.str();
telsoa01c577f2c2018-08-31 09:22:23 +0100661
662 errors << errorString.str() << "\n";
663 }
664 ++operatorIndex;
665 }
666
667 SetupInputLayers(subgraphIndex);
668 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200669 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100670
671 ++subgraphIndex;
672 }
673
674 if (failedToCreate)
675 {
676 // we can skip everything and let the outer exception handler deal with the error
677 throw ParseException(errors.str());
678 }
679
680 // establish the connections from the layer outputs to the inputs of the subsequent layers
681 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
682 {
683 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
684 {
685 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
686 {
687 for (size_t inputSlotIdx = 0;
688 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
689 ++inputSlotIdx)
690 {
691 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
692 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
693 }
694 }
695 }
696 }
697
698 return std::move(m_Network);
699}
700
701void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
702 size_t tensorIndex,
703 armnn::IOutputSlot* slot)
704{
705 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
706 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
707 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
708
709 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
710
711 // assuming there is only one producer for that tensor
712 if (tensorSlots.outputSlot != nullptr)
713 {
714 throw ParseException(boost::str(
715 boost::format("Another layer has already registered itself as the producer of "
716 "subgraph:%1% tensor:%2% %3%") %
717 subgraphIndex %
718 tensorIndex %
719 CHECK_LOCATION().AsString()));
720 }
721
722 tensorSlots.outputSlot = slot;
723}
724
725void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
726 size_t tensorIndex,
727 armnn::IInputSlot* slot)
728{
729 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
730 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
731 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
732
733 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
734 tensorSlots.inputSlots.push_back(slot);
735}
736
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100737void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
738{
739 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
740
741 // NOTE: By default we presume the custom operator is not supported
742 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
743
744 // Identify custom code defined for custom operator
745 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
746 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
747
748 // Find parser function that correspondes to custom code (if any)
749 auto iterator = m_CustomParserFunctions.find(customCode);
750 if (iterator != m_CustomParserFunctions.end())
751 {
752 customParserFunction = iterator->second;
753 }
754
755 // Run parser function
756 (this->*customParserFunction)(subgraphIndex, operatorIndex);
757}
758
telsoa01c577f2c2018-08-31 09:22:23 +0100759void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
760{
761 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100762
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100763 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
764
765 auto opcodeIndex = operatorPtr->opcode_index;
766 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
767
768 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
769 {
770 // Do not add StandInLayer, throw ParseException instead
771 throw ParseException(
772 boost::str(
773 boost::format("Operator not supported. "
774 "subgraph:%1% operator:%2% "
775 "opcode_index:%3% opcode:%4% / %5% %6%") %
776 subgraphIndex %
777 operatorIndex %
778 opcodeIndex %
779 opcode %
780 tflite::EnumNameBuiltinOperator(opcode) %
781 CHECK_LOCATION().AsString()));
782 }
783
784 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
785 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
786
787 const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
788 const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
789
790 StandInDescriptor descriptor(numInputs, numOutputs);
791 auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
792
793 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
794 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
795 for (unsigned int i = 0u; i < numOutputs; ++i)
796 {
797 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i]));
798 }
799
800 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
801 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
802
803 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
804 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100805}
806
telsoa01c577f2c2018-08-31 09:22:23 +0100807void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
808{
809 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
810
811 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
812 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
813
814 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
815
816 Convolution2dDescriptor desc;
817 desc.m_BiasEnabled = false;
818 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
819 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000820 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100821 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
822 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000823
telsoa01c577f2c2018-08-31 09:22:23 +0100824 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
825 CHECK_VALID_SIZE(inputs.size(), 2, 3);
826
827 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
828 CHECK_VALID_SIZE(outputs.size(), 1);
829
830 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
831 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
832
833 // assuming input is NHWC
834 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
835 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
836
837 // assuming the filter is OHWI : Output, H, W, Input
838 // which is essentially the same as NHWC
839 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
840 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
841
Pablo Tellof0bd6832019-04-26 17:58:13 +0100842 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
843 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
844 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
845 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100846
Matteo Martincigh747ef822018-12-18 09:26:39 +0000847 auto filterTensorAndData = CreateConstTensor(inputs[1],
848 filterTensorInfo,
849 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100850 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100851
852 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
853
854 if (inputs.size() == 3)
855 {
856 desc.m_BiasEnabled = true;
857 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000858 auto biasTensorAndData = CreateConstTensor(inputs[2],
859 biasTensorInfo,
860 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100861 layer = m_Network->AddConvolution2dLayer(desc,
862 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100863 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100864 layerName.c_str());
865 }
866 else
867 {
868 layer = m_Network->AddConvolution2dLayer(desc,
869 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100870 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100871 layerName.c_str());
872 }
873
874 BOOST_ASSERT(layer != nullptr);
875
telsoa01c577f2c2018-08-31 09:22:23 +0100876 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000877 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100878
879 // register the input connection slots for the layer, connections are made after all layers have been created
880 // only the tensors for the inputs are relevant, exclude the const tensors
881 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000882 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100883
jimfly01c25411c2018-11-14 17:47:22 +0000884 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100885 // register the output connection slots for the layer, connections are made after all layers have been created
886 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
887 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
888}
889
890void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
891{
892 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
893
894 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
895 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
896
897 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
898
899 DepthwiseConvolution2dDescriptor desc;
900 desc.m_BiasEnabled = false;
901 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
902 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000903 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100904 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100905
906 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
907 CHECK_VALID_SIZE(inputs.size(), 2, 3);
908 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
909 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100910 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
911 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000912
Keith Davis0c2eeac2020-02-11 16:51:50 +0000913 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
914 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
915
telsoa01c577f2c2018-08-31 09:22:23 +0100916 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000917 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1], permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100918
Matteo Martincigh747ef822018-12-18 09:26:39 +0000919 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100920 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
921 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000922
923 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100924 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
925 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
926
Matteo Martincigh747ef822018-12-18 09:26:39 +0000927 // Reshape weights as [ H, W, I, M ]
928 filterTensorInfo.SetShape({ filterHeight,
929 filterWidth,
930 inputTensorInfo.GetShape()[3],
931 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
932
Pablo Tellof0bd6832019-04-26 17:58:13 +0100933 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
934 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
935 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
936 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100937
Matteo Martincigh747ef822018-12-18 09:26:39 +0000938 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100939 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100940 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
941
942 if (inputs.size() == 3)
943 {
944 desc.m_BiasEnabled = true;
945 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000946 auto biasTensorAndData = CreateConstTensor(inputs[2],
947 biasTensorInfo,
948 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100949 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
950 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100951 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100952 layerName.c_str());
953 }
954 else
955 {
956 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
957 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100958 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100959 layerName.c_str());
960 }
961 BOOST_ASSERT(layer != nullptr);
962
telsoa01c577f2c2018-08-31 09:22:23 +0100963 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000964 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100965
966 // register the input connection slots for the layer, connections are made after all layers have been created
967 // only the tensors for the inputs are relevant, exclude the const tensors
968 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000969 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100970
jimfly01c25411c2018-11-14 17:47:22 +0000971 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100972 // register the output connection slots for the layer, connections are made after all layers have been created
973 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
974 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
975}
976
Finn Williamsed66d142019-12-06 09:55:55 +0000977void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
978{
979 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
980
981 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
982 CHECK_VALID_SIZE(inputs.size(), 1);
983
984 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
985 CHECK_VALID_SIZE(outputs.size(), 1);
986
987 auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
988
989 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
990 BOOST_ASSERT(layer != nullptr);
991
992 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
993 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
994
995 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
996 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
997
998 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
999 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1000}
1001
Keith Davis4cd29a02019-09-09 14:49:20 +01001002void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
1003{
1004 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1005
1006 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001007 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001008
1009 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1010 CHECK_VALID_SIZE(outputs.size(), 1);
1011
1012 armnn::IConnectableLayer* layer = nullptr;
1013 auto layerName = boost::str(boost::format("Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
1014
1015 PermuteDescriptor desc;
1016
josh minorba424d22019-11-13 10:55:17 -06001017 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001018 {
1019 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1020 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001021 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1022 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001023 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
1024
josh minorba424d22019-11-13 10:55:17 -06001025 // permuteShape assumes Tf/Np permute vectors, we must translate to armnn expected form
1026 // to do so we find the perm vector which would invert what a tf perm vector would do (ex 3,0,1,2 -> 1,2,3,0)
1027 std::vector<unsigned int> armnnPermuteShape(numPermVecElements);
1028 std::vector<unsigned int>::iterator it;
1029 for (unsigned int i = 0u; i < numPermVecElements; ++i)
1030 {
1031 it = std::find(permuteShape.begin(), permuteShape.end(), i);
1032 armnnPermuteShape[i] = static_cast<unsigned int>(std::distance(permuteShape.begin(), it));
1033 }
Kevin May85d92602019-09-27 17:21:06 +01001034
josh minorba424d22019-11-13 10:55:17 -06001035 PermutationVector permutationVector(armnnPermuteShape.data(), permuteTensorInfo.GetNumElements());
1036
1037 desc = PermuteDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001038 }
1039
Keith Davis4cd29a02019-09-09 14:49:20 +01001040 layer = m_Network->AddPermuteLayer(desc, layerName.c_str());
1041
1042 BOOST_ASSERT(layer != nullptr);
1043
1044 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1045 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1046
1047 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1048 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1049
1050 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1051 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1052}
1053
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001054void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
1055{
1056 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1057
1058 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1059 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1060
1061 TransposeConvolution2dDescriptor desc;
1062 desc.m_BiasEnabled = false;
1063 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1064 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1065 desc.m_DataLayout = armnn::DataLayout::NHWC;
1066
1067 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001068 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001069
1070 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1071 CHECK_VALID_SIZE(outputs.size(), 1);
1072
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001073 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001074 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1075
1076 // TfLite uses NHWC tensors
1077 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1078 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1079
1080 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1081 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1082
1083 CalcPadding(inputHeight,
1084 filterHeight,
1085 desc.m_StrideY,
1086 1, // DilationY
1087 desc.m_PadTop,
1088 desc.m_PadBottom,
1089 options->padding);
1090
1091 CalcPadding(inputWidth,
1092 filterWidth,
1093 desc.m_StrideX,
1094 1, // DilationX
1095 desc.m_PadLeft,
1096 desc.m_PadRight,
1097 options->padding);
1098
1099 auto filterTensorAndData = CreateConstTensor(inputs[1],
1100 filterTensorInfo,
1101 armnn::Optional<armnn::PermutationVector&>());
1102
1103 armnn::IConnectableLayer* layer = nullptr;
1104 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
1105
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001106 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1107 filterTensorAndData.first,
1108 EmptyOptional(),
1109 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001110
1111 BOOST_ASSERT(layer != nullptr);
1112
1113 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1114 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1115
1116 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1117 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001118 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001119
1120 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1121 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1122}
1123
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001124void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
1125{
1126 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1127}
1128
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001129void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
1130{
1131 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1132
1133 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1134 CHECK_VALID_SIZE(inputs.size(), 3);
1135
1136 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1137 CHECK_VALID_SIZE(outputs.size(), 1);
1138
1139 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1140 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1141
1142 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1143 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1144
1145 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1146 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1147
1148 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1149 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1150
1151 size_t step = 2;
1152 std::vector<std::pair<unsigned int, unsigned int>> crops;
1153 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1154 {
1155 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1156 }
1157
1158 armnn::BatchToSpaceNdDescriptor desc;
1159 desc.m_BlockShape = blockShape;
1160 desc.m_Crops = crops;
1161 desc.m_DataLayout = armnn::DataLayout::NHWC;
1162
1163 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1164
1165 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1166 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1167
1168 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1169
1170 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1171 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1172
1173 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1174 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1175}
1176
Matthew Jackson28c94572019-07-18 10:47:03 +01001177void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1178{
1179 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1180
1181 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1182 CHECK_VALID_SIZE(inputs.size(), 1);
1183
1184 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1185 CHECK_VALID_SIZE(outputs.size(), 1);
1186
1187 L2NormalizationDescriptor desc;
1188 desc.m_DataLayout = armnn::DataLayout::NHWC;
1189 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1190 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1191
1192 BOOST_ASSERT(layer != nullptr);
1193
1194 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1195 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1196
1197 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1198 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1199
1200 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1201 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1202}
1203
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001204void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1205{
1206 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1207}
1208
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001209void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1210{
1211 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1212
1213 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1214 CHECK_VALID_SIZE(inputs.size(), 2);
1215
1216 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1217 CHECK_VALID_SIZE(outputs.size(), 1);
1218
1219 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1220 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1221
1222 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1223 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1224
1225 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1226 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1227
1228 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1229 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1230 {
1231 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1232 }
1233 else
1234 {
1235 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1236 }
1237
1238 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1239 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1240}
1241
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001242void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1243{
1244 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1245
1246 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1247 CHECK_VALID_SIZE(inputs.size(), 2);
1248
1249 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1250 CHECK_VALID_SIZE(outputs.size(), 1);
1251
1252 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1253 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1254
1255 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1256 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1257
1258 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1259 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1260
1261 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1262 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1263 {
1264 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1265 }
1266 else
1267 {
1268 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1269 }
1270
1271 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1272 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1273}
1274
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001275void TfLiteParser::ParsePool(size_t subgraphIndex,
1276 size_t operatorIndex,
1277 PoolingAlgorithm algorithm)
1278{
1279 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1280
1281 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1282 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1283
1284 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1285
1286 std::string layerName;
1287
1288 switch (algorithm)
1289 {
1290 case PoolingAlgorithm::Average:
1291 layerName =
1292 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1293 break;
1294 case PoolingAlgorithm::Max:
1295 layerName =
1296 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1297 break;
1298 default:
1299 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1300 }
1301
1302 Pooling2dDescriptor desc;
1303
1304 desc.m_PoolType = algorithm;
1305 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1306 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1307 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1308 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1309 desc.m_PaddingMethod = PaddingMethod::Exclude;
1310 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001311 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001312
1313 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1314 CHECK_VALID_SIZE(inputs.size(), 1);
1315 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1316
1317 // assuming input is NHWC
1318 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1319 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1320
Pablo Tellof0bd6832019-04-26 17:58:13 +01001321 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1322 desc.m_PadTop, desc.m_PadBottom, options->padding);
1323 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1324 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001325
1326 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1327 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001328
1329 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1330
1331 BOOST_ASSERT(layer != nullptr);
1332
jimfly01c25411c2018-11-14 17:47:22 +00001333 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1334 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001335
1336 // register the input connection slots for the layer, connections are made after all layers have been created
1337 // only the tensors for the inputs are relevant, exclude the const tensors
1338 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001339 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001340
jimfly01c25411c2018-11-14 17:47:22 +00001341 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001342 // register the output connection slots for the layer, connections are made after all layers have been created
1343 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1344 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1345}
1346
josh minorba424d22019-11-13 10:55:17 -06001347void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
1348{
1349 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1350
1351 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1352 CHECK_VALID_SIZE(inputs.size(), 3);
1353 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1354 CHECK_VALID_SIZE(outputs.size(), 1);
1355
1356 SliceDescriptor desc;
1357
1358 // set begin tensor info for slice descriptor
1359 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1360 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1361
1362 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1363 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1364
1365 // set size tensor info for slice descriptor
1366 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1367 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1368
1369 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1370 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1371 desc = SliceDescriptor(begin, size);
1372
1373 auto layerName = boost::str(boost::format("Slice:%1%:%2%") % subgraphIndex % operatorIndex);
1374 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
1375
1376 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1377 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1378
1379 // register the input connection slots for the layer, connections are made after all layers have been created
1380 // only the tensors for the inputs are relevant, exclude the const tensors
1381 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1382 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1383
1384 // register the output connection slots for the layer, connections are made after all layers have been created
1385 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1386 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1387}
1388
telsoa01c577f2c2018-08-31 09:22:23 +01001389void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1390{
1391 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1392 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1393 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1394
1395 SoftmaxDescriptor desc;
1396 desc.m_Beta = options->beta;
1397
1398 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1399 CHECK_VALID_SIZE(inputs.size(), 1);
1400 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1401 CHECK_VALID_SIZE(outputs.size(), 1);
1402
1403 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1404 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1405
1406 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1407 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1408
1409 // register the input connection slots for the layer, connections are made after all layers have been created
1410 // only the tensors for the inputs are relevant, exclude the const tensors
1411 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1412 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1413
1414 // register the output connection slots for the layer, connections are made after all layers have been created
1415 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1416 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1417}
1418
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001419void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1420{
1421 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1422
1423 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1424 CHECK_VALID_SIZE(inputs.size(), 3);
1425
1426 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1427 CHECK_VALID_SIZE(outputs.size(), 1);
1428
1429 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1430 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1431
1432 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1433 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1434
1435 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1436 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1437
1438 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1439 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1440
1441 size_t step = 2;
1442 std::vector<std::pair<unsigned int, unsigned int>> padList;
1443 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1444 {
1445 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1446 }
1447
1448 armnn::SpaceToBatchNdDescriptor desc;
1449 desc.m_BlockShape = blockShape;
1450 desc.m_PadList = padList;
1451 desc.m_DataLayout = armnn::DataLayout::NHWC;
1452
1453 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1454
1455 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1456 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1457
1458 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1459
1460 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1461 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1462
1463 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1464 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1465}
1466
telsoa01c577f2c2018-08-31 09:22:23 +01001467armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1468 const armnn::TensorInfo & inputTensorInfo)
1469{
1470 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1471 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1472 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1473
1474 if (inputTensorInfo.GetNumDimensions() > 4)
1475 {
1476 std::stringstream ss;
1477 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1478 << " shape:" << inputTensorInfo.GetShape() << " "
1479 << CHECK_LOCATION().AsString();
1480 throw ParseException(ss.str());
1481 }
1482
1483 if (squeezeDims.empty())
1484 {
1485 squeezeDims.assign(dimensionSequence,
1486 dimensionSequence+inputTensorInfo.GetNumDimensions());
1487 }
1488
1489 std::vector<uint32_t> outputDims;
1490 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1491 {
1492 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1493 auto currentDimension = inputTensorInfo.GetShape()[i];
1494 if (skipSqueeze || currentDimension != 1)
1495 {
1496 outputDims.push_back(currentDimension);
1497 }
1498 }
1499
1500 if (outputDims.size() > 4)
1501 {
1502 std::stringstream ss;
1503 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1504 << " shape:" << inputTensorInfo.GetShape() << " "
1505 << CHECK_LOCATION().AsString();
1506 throw ParseException(ss.str());
1507 }
1508
1509 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1510 outputDims.data());
1511
1512 // we need to preserve the tensor type and the quantization data as well
1513 TensorInfo outTensorInfo = inputTensorInfo;
1514 outTensorInfo.SetShape(outShape);
1515
1516 return outTensorInfo;
1517}
1518
1519void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1520{
1521 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1522
1523 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1524 CHECK_VALID_SIZE(inputs.size(), 1);
1525
1526 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1527 CHECK_VALID_SIZE(outputs.size(), 1);
1528
1529 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1530 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1531
1532 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1533 armnn::TensorInfo outputTensorInfo =
1534 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1535 inputTensorInfo);
1536
1537 ReshapeDescriptor reshapeDesc;
1538 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1539
1540 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1541 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1542 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1543
1544 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1545 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1546
1547 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1548 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1549}
1550
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001551void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1552{
1553 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1554
1555 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1556 CHECK_VALID_SIZE(inputs.size(), 4);
1557
1558 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1559 CHECK_VALID_SIZE(outputs.size(), 1);
1560
1561 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1562 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1563
1564 StridedSliceDescriptor desc;
1565 desc.m_BeginMask = options->begin_mask;
1566 desc.m_EllipsisMask = options->ellipsis_mask;
1567 desc.m_EndMask = options->end_mask;
1568 desc.m_NewAxisMask = options->new_axis_mask;
1569 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1570 desc.m_DataLayout = armnn::DataLayout::NHWC;
1571
1572 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1573 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1574
1575 std::vector<int> begin(beginTensorInfo.GetNumElements());
1576 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1577
1578 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1579 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1580
1581 std::vector<int> end(endTensorInfo.GetNumElements());
1582 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1583
1584 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1585 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1586
1587 std::vector<int> stride(strideTensorInfo.GetNumElements());
1588 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1589
1590 desc.m_Begin = begin;
1591 desc.m_End = end;
1592 desc.m_Stride = stride;
1593
1594 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1595 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1596
1597 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1598 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1599
1600 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1601 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1602
1603 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1604 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1605}
1606
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001607void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1608{
1609 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1610
1611 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1612 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1613
1614 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1615 CHECK_VALID_SIZE(inputs.size(), 2);
1616
1617 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1618 CHECK_VALID_SIZE(outputs.size(), 1);
1619
1620 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1621 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1622
1623 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1624 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1625
1626 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1627 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1628
1629 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1630 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1631 {
1632 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1633 }
1634 else
1635 {
1636 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1637 }
1638
1639 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1640
1641 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1642 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1643}
1644
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001645void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1646{
1647 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1648
1649 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1650 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1651
1652 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1653 CHECK_VALID_SIZE(inputs.size(), 2);
1654
1655 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1656 CHECK_VALID_SIZE(outputs.size(), 1);
1657
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001658 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1659 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1660
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001661 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1662 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1663
1664 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1665 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1666
1667 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001668 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1669 {
1670 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1671 }
1672 else
1673 {
1674 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1675 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001676
1677 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1678
1679 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1680 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1681}
1682
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001683void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1684{
1685 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1686
1687 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1688 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1689
1690 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1691 CHECK_VALID_SIZE(inputs.size(), 2);
1692
1693 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1694 CHECK_VALID_SIZE(outputs.size(), 1);
1695
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001696 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1697 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1698
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001699 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1700 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1701
1702 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1703 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1704
1705 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001706 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1707 {
1708 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1709 }
1710 else
1711 {
1712 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1713 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001714
1715 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1716
1717 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1718 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1719}
1720
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001721void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1722{
1723 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1724
1725 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1726
1727 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1728 CHECK_VALID_SIZE(outputs.size(), 1);
1729
1730 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1731 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1732
1733 armnn::MeanDescriptor desc;
1734 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1735 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1736 desc.m_Axis = axis;
1737
1738 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1739 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1740
1741 desc.m_KeepDims =
1742 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1743 true : false;
1744
1745 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1746 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1747
1748 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1749
1750 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1751 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1752
1753 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1754 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1755}
1756
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001757void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1758{
1759 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1760
1761 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1762
1763 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1764 CHECK_VALID_SIZE(outputs.size(), 1);
1765
1766 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1767 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1768
1769 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1770 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1771
1772 size_t step = 2;
1773 armnn::PadDescriptor desc;
1774 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1775 {
1776 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1777 }
1778
1779 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1780 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1781
1782 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1783 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1784
1785 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1786 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1787
1788 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1789 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1790}
1791
Sadik Armagan66dedc72019-12-10 16:32:07 +00001792void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
1793{
1794 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1795
1796 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1797 CHECK_VALID_SIZE(inputs.size(), 1);
1798
1799 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1800 CHECK_VALID_SIZE(outputs.size(), 1);
1801
1802 auto layerName = boost::str(boost::format("Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
1803
1804 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
1805 BOOST_ASSERT(layer != nullptr);
1806
1807 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1808 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1809
1810 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1811 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1812
1813 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1814 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1815}
Finn Williamsc42c3842019-01-22 14:18:11 +00001816
Sadik Armagan58f39192018-09-17 14:14:39 +01001817void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1818{
Finn Williamsc42c3842019-01-22 14:18:11 +00001819 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001820}
1821
1822void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1823{
Finn Williamsc42c3842019-01-22 14:18:11 +00001824 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1825}
Sadik Armagan58f39192018-09-17 14:14:39 +01001826
Finn Williamsc42c3842019-01-22 14:18:11 +00001827void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1828{
1829 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1830}
1831
Nina Drozd99851762019-04-09 09:37:38 +01001832void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1833{
1834 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1835}
1836
Finn Williamsc42c3842019-01-22 14:18:11 +00001837
1838void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1839{
1840 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001841 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1842 boost::ignore_unused(operatorPtr);
1843
1844 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1845 CHECK_VALID_SIZE(inputs.size(), 1);
1846
1847 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1848 CHECK_VALID_SIZE(outputs.size(), 1);
1849
Finn Williamsc42c3842019-01-22 14:18:11 +00001850 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001851 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001852 activationDesc.m_Function = activationType;
1853
1854 switch (activationType)
1855 {
1856 case ActivationFunction::ReLu:
1857 {
1858 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1859 break;
1860 }
1861 case ActivationFunction::BoundedReLu:
1862 {
1863 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1864 activationDesc.m_A = 6.0f;
1865 activationDesc.m_B = 0.0f;
1866 break;
1867 }
1868 case ActivationFunction::Sigmoid:
1869 {
1870 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1871 break;
1872 }
Nina Drozd99851762019-04-09 09:37:38 +01001873 case ActivationFunction::TanH:
1874 {
1875 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1876 activationDesc.m_A = 1.0f;
1877 activationDesc.m_B = 1.0f;
1878 break;
1879 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001880 default:
1881 {
1882 throw ParseException(
1883 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1884 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1885 }
1886 }
1887
1888 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001889
1890 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1891 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1892
1893 // register the input connection slots for the layer, connections are made after all layers have been created
1894 // only the tensors for the inputs are relevant, exclude the const tensors
1895 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1896 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1897
1898 // register the output connection slots for the layer, connections are made after all layers have been created
1899 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1900 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1901}
Sadikb94967b2018-09-19 15:30:00 +01001902armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1903 const std::vector<int32_t> & targetDimsIn)
1904{
1905 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1906 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1907
1908 if (stretchDim != targetDimsIn.end())
1909 {
1910 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1911 {
1912 throw ParseException(
1913 boost::str(
1914 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1915 }
1916
1917 auto targetNumElements =
1918 boost::numeric_cast<unsigned int>(
1919 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1920
1921 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1922 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1923 }
1924
1925 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1926
1927 TensorInfo reshapeInfo = inputTensorInfo;
1928 reshapeInfo.SetShape(outputShape);
1929
1930 return reshapeInfo;
1931}
1932
1933void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1934{
1935 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1936
1937 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001938
1939 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1940 CHECK_VALID_SIZE(outputs.size(), 1);
1941
1942 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1943 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1944
1945 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001946 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1947 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001948 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1949
kevmay0171972a82018-12-17 14:28:03 +00001950 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001951 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1952 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001953 {
1954 std::stringstream ss;
1955 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001956 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001957 << " does not equal output shape "
1958 << actualOutputTensorInfo.GetShape()
1959 << ": "
1960 << CHECK_LOCATION().AsString();
1961 throw ParseException(ss.str());
1962 }
1963
Sadikb94967b2018-09-19 15:30:00 +01001964 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001965 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001966
1967 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1968 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001969 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001970
1971 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1972 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1973
1974 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1975 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1976}
1977
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001978void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1979{
Sadik Armagana3b31f02019-12-05 09:08:53 +00001980 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
1981}
1982
1983void TfLiteParser::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
1984{
1985 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
1986}
1987
1988void TfLiteParser::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
1989{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001990 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1991
1992 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1993 CHECK_VALID_SIZE(inputs.size(), 2);
1994
1995 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1996 CHECK_VALID_SIZE(outputs.size(), 1);
1997
1998 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1999
2000 // Data for the parsed tensor args (size) must be stored locally.
2001 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2002
2003 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2004 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2005
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002006 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002007 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002008 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002009 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2010 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002011
Sadik Armagana3b31f02019-12-05 09:08:53 +00002012 auto layerName = str(boost::format("Resize:"));
2013
2014 switch (resizeMethod)
2015 {
2016 case ResizeMethod::Bilinear:
2017 {
2018 layerName += str(boost::format("BILINEAR:%1%:%2%") % subgraphIndex % operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002019
2020 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2021 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2022
2023 desc.m_BilinearAlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002024 break;
2025 }
2026 case ResizeMethod::NearestNeighbor:
2027 {
2028 layerName += str(boost::format("NEARESTNEIGHBOR:%1%:%2%") % subgraphIndex % operatorIndex);
2029 break;
2030 }
2031 default:
2032 {
2033 throw ParseException(
2034 boost::str(boost::format("Unexpected ResizeMethod[%1%] when creating layerName "
2035 " %2% ") %static_cast<int>(resizeMethod)% CHECK_LOCATION().AsString()));
2036 }
2037 }
2038
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002039 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002040
2041 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2042 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2043
2044 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2045 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2046
2047 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2048 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2049}
2050
Sadik Armagan479045b2018-10-01 11:51:37 +01002051void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
2052{
2053 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2054
2055 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2056 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2057
2058 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2059
2060 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2061 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2062 CHECK_VALID_SIZE(outputs.size(), 1);
2063
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002064 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2065 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002066
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002067 const unsigned int concatDimInput = static_cast<unsigned int>(
2068 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002069
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002070 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2071 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002072
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002073 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002074
2075 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2076 {
2077 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2078
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002079 // This set up concatDescriptor view origin
2080 armnnUtils::ProcessConcatInputTensorInfo(
2081 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002082 }
2083
2084 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01002085 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01002086
2087 BOOST_ASSERT(layer != nullptr);
2088
2089 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2090 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01002091
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002092 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002093
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002094 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002095
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002096 // add fused activation layer
2097 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002098
Sadik Armagan479045b2018-10-01 11:51:37 +01002099 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2100 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2101}
2102
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002103void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
2104{
2105 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2106
2107 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2108 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2109
2110 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2111
2112 FullyConnectedDescriptor desc;
2113 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002114 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002115
2116 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2117 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2118 CHECK_VALID_SIZE(outputs.size(), 1);
2119
2120 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2121
2122 // Fully Connected Layer accepts two dimensional weights input
2123 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2124 if (weightsDimension != 2)
2125 {
2126 throw ParseException(
2127 boost::str(
2128 boost::format(
2129 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
2130 "Node %2%")
2131 % weightsDimension
2132 % CHECK_LOCATION().AsString()));
2133 }
2134
Matteo Martincigh747ef822018-12-18 09:26:39 +00002135 auto filterTensorAndData = CreateConstTensor(inputs[1],
2136 filterTensorInfo,
2137 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002138 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002139 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
2140
2141 if (inputs.size() == 3)
2142 {
2143 desc.m_BiasEnabled = true;
2144 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00002145 auto biasTensorAndData = CreateConstTensor(inputs[2],
2146 biasTensorInfo,
2147 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002148 layer = m_Network->AddFullyConnectedLayer(desc,
2149 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002150 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002151 layerName.c_str());
2152 }
2153 else
2154 {
2155 layer = m_Network->AddFullyConnectedLayer(desc,
2156 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002157 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002158 layerName.c_str());
2159 }
2160 BOOST_ASSERT(layer != nullptr);
2161
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002162 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2163
2164 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2165
2166 if (inputTensorInfo.GetNumDimensions() > 2)
2167 {
2168 // Add reshape to flatten to 2D [batch_size, input_size],
2169 // where "input_size" corresponds to the number of inputs to the layer,
2170 // matching the second dimension of weights,
2171 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2172 std::vector<unsigned int> reshapedDimensions(2);
2173 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2174 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2175
2176 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2177 {
2178 throw ParseException(
2179 boost::str(
2180 boost::format(
2181 "Failed to deduce input tensor shape from filter size %1%")
2182 % reshapedDimensions[1]
2183 % CHECK_LOCATION().AsString()));
2184 }
2185
2186 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2187 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2188
2189 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2190 armnn::ReshapeDescriptor desc;
2191 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2192 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2193
2194 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2195 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2196
2197 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2198 }
2199 else
2200 {
2201 // register the input connection slot for the layer
2202 // only the tensors for the inputs are relevant, exclude the const tensors
2203 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2204 }
2205
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002206 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2207 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2208
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002209 // we need to add the activation layer and fortunately we don't need to care about the data layout
2210 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2211 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002212
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002213 // register the output connection slots for the layer, connections are made after all layers have been created
2214 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2215 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2216}
2217
keidav011b3e2ea2019-02-21 10:07:37 +00002218void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
2219{
2220 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2221
2222 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2223
2224 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2225 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2226 CHECK_VALID_SIZE(outputs.size(), 4);
2227
2228 // Obtain custom options from flexbuffers
2229 auto custom_options = operatorPtr->custom_options;
2230 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2231
2232 // Obtain descriptor information from tf lite
2233 DetectionPostProcessDescriptor desc;
2234 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2235 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2236 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2237 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2238 desc.m_NumClasses = m["num_classes"].AsUInt32();
2239 desc.m_ScaleH = m["h_scale"].AsFloat();
2240 desc.m_ScaleW = m["w_scale"].AsFloat();
2241 desc.m_ScaleX = m["x_scale"].AsFloat();
2242 desc.m_ScaleY = m["y_scale"].AsFloat();
2243
keidav0107d58c72019-02-26 11:57:39 +00002244 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002245 {
keidav0107d58c72019-02-26 11:57:39 +00002246 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002247 }
2248 if (!(m["detections_per_class"].IsNull()))
2249 {
2250 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2251 }
2252
2253 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2254 {
2255 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2256 "must be positive and less than or equal to 1.");
2257 }
2258
2259 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2260 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2261 armnn::Optional<armnn::PermutationVector&>());
2262
2263 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2264 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2265 layerName.c_str());
2266
2267 BOOST_ASSERT(layer != nullptr);
2268
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002269 // The model does not specify the output shapes.
2270 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2271 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2272 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2273 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2274 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2275 m_OverridenOutputShapes.push_back({ 1 });
2276
keidav011b3e2ea2019-02-21 10:07:37 +00002277 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2278 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002279 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002280 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2281 }
2282
2283 // Register the input connection slots for the layer, connections are made after all layers have been created
2284 // only the tensors for the inputs are relevant, exclude the const tensors
2285 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2286 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2287
2288 // Register the output connection slots for the layer, connections are made after all layers have been created
2289 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2290 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2291 outputTensorIndexes[1],
2292 outputTensorIndexes[2],
2293 outputTensorIndexes[3]});
2294}
2295
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002296/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2297void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2298{
2299 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2300
2301 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2302 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2303 CHECK_VALID_SIZE(outputs.size(), 1);
2304
2305 if (inputs.size() < 1)
2306 {
2307 throw ParseException("Pack must have at least one input.");
2308 }
2309
2310 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2311 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2312
2313 StackDescriptor desc;
2314 desc.m_Axis = static_cast<uint32_t>(options->axis);
2315 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2316
2317 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2318 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2319 desc.m_InputShape = inputTensorInfo.GetShape();
2320
2321 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2322 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2323
2324 BOOST_ASSERT(layer != nullptr);
2325
2326 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2327 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2328
2329 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2330 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2331
2332 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2333 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2334}
2335
Nina Drozd200e3802019-04-15 09:47:39 +01002336void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2337{
2338 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2339
2340 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2341 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2342
2343 // This unpackAxis indicates the axis to unpack
2344 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2345
2346 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2347 CHECK_VALID_SIZE(inputs.size(), 1);
2348
2349 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002350
2351 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2352 {
2353 throw ParseException(
2354 boost::str(
2355 boost::format(
2356 "The unpack axis: %1% cannot be greater than or equal to "
2357 "the number of input dimension %2% %3%")
2358 % unpackAxis
2359 % inputTensorInfo.GetNumDimensions()
2360 % CHECK_LOCATION().AsString()));
2361 }
2362
Nina Drozd200e3802019-04-15 09:47:39 +01002363 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2364 // If num is not defined, automatically infer from the length of the dimension axis.
2365 if(unpackNum == 0)
2366 {
2367 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2368 }
2369
2370 // If unpack number cannot be inferred and is still zero, throw ParseException.
2371 if(unpackNum == 0)
2372 {
2373 throw ParseException("Number to unpack must greater than zero.");
2374 }
2375
2376 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2377 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2378
2379 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2380 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2381
2382 // Add current input shape to unpackDimSizes
2383 for (unsigned int i = 0; i < inputDimSize; ++i)
2384 {
2385 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2386 }
2387
2388 if (unpackDimSizes[unpackAxis] != unpackNum)
2389 {
2390 throw ParseException("Number to unpack must be the same as length of the dimension to "
2391 "unpack along.");
2392 }
2393
2394 unpackDimSizes[unpackAxis] /= unpackNum;
2395
2396 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2397 for (unsigned int j = 0; j < unpackNum; ++j)
2398 {
2399 // Set the size of the views.
2400 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2401 {
2402 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2403 }
2404 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2405 }
2406
2407 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2408 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2409
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002410 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2411 unpackDimSizes.data());
2412
Nina Drozd200e3802019-04-15 09:47:39 +01002413 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2414 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2415
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002416 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2417 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2418 {
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002419 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002420 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2421 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002422 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002423 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2424
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002425 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2426 outputTensorInfo.GetDataType(),
2427 outputTensorInfo.GetQuantizationScale(),
2428 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002429 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2430
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002431 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002432
2433 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2434 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2435 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2436 }
Nina Drozd200e3802019-04-15 09:47:39 +01002437}
2438
Nina Drozd0324f482019-04-08 10:52:10 +01002439void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2440{
2441 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2442
2443 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2444 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2445
2446 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2447
Nina Drozd200e3802019-04-15 09:47:39 +01002448 // If number of splits cannot be inferred and is zero, throw ParseException.
2449 if(numSplits == 0)
2450 {
2451 throw ParseException("Number to splits must greater than zero.");
2452 }
2453
Nina Drozd0324f482019-04-08 10:52:10 +01002454 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2455 CHECK_VALID_SIZE(inputs.size(), 2);
2456 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2457 CHECK_VALID_SIZE(outputs.size(), numSplits);
2458
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002459 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2460 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002461
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002462 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2463 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2464 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2465
2466 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2467 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002468
2469 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2470 if (splitDim == 0 || splitDim == 2)
2471 {
2472 throw ParseException(
2473 boost::str(
2474 boost::format(
2475 "Dimension %1% for split is not supported by Armnn. %2%")
2476 % splitDim
2477 % CHECK_LOCATION().AsString()));
2478 }
2479
2480 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002481 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002482 {
2483 throw ParseException(
2484 boost::str(
2485 boost::format(
2486 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002487 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002488 % inputTensorInfo.GetNumDimensions()
2489 % MaxNumOfTensorDimensions
2490 % CHECK_LOCATION().AsString()));
2491 }
2492
2493 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2494
2495 // Add current input shape to splitterDimSizes
2496 for (unsigned int i = 0; i < inputDimSize; ++i)
2497 {
2498 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2499 }
2500
2501 if (splitterDimSizes[splitDim] % numSplits != 0)
2502 {
2503 throw ParseException("Number of splits must evenly divide the dimension");
2504 }
2505 splitterDimSizes[splitDim] /= numSplits;
2506
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002507 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002508 for (unsigned int j = 0; j < numSplits; ++j)
2509 {
2510 // Set the size of the views.
2511 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2512 {
2513 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2514 }
2515 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2516 }
2517
2518 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2519 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2520
2521 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002522 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002523
Nina Drozd0324f482019-04-08 10:52:10 +01002524 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2525 {
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002526 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k]);
2527 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002528 }
2529
2530 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2531 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2532}
2533
Sadik Armagan58f39192018-09-17 14:14:39 +01002534armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2535 unsigned int outputSlot,
2536 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002537{
2538 ActivationDescriptor activationDesc;
2539 std::string layerName = prevLayer->GetName();
2540
2541 switch(activationType)
2542 {
2543 case tflite::ActivationFunctionType_NONE:
2544 {
2545 // this is a no-op: return previous layer
2546 return prevLayer;
2547 }
2548 case tflite::ActivationFunctionType_RELU:
2549 {
2550 activationDesc.m_Function = ActivationFunction::ReLu;
2551 layerName += ":RELU";
2552 break;
2553 }
2554 case tflite::ActivationFunctionType_RELU6:
2555 {
2556 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2557 activationDesc.m_A = 6.0f;
2558 activationDesc.m_B = 0.0f;
2559 layerName += ":RELU6";
2560 break;
2561 }
2562 case tflite::ActivationFunctionType_TANH:
2563 {
2564 activationDesc.m_Function = ActivationFunction::TanH;
2565 activationDesc.m_A = 1.0f;
2566 activationDesc.m_B = 1.0f;
2567 layerName += ":TANH";
2568 break;
2569 }
2570
2571 // I only put these here as a reminder what others we could support
2572 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2573 case tflite::ActivationFunctionType_SIGN_BIT:
2574 default:
2575 {
2576 throw ParseException(
2577 boost::str(
2578 boost::format("TfLite parser doesn't suppport fused activation: "
2579 "%1%/%2% %3% ") %
2580 activationType %
2581 tflite::EnumNameActivationFunctionType(activationType) %
2582 CHECK_LOCATION().AsString()));
2583
2584 }
2585 }
2586
2587 IConnectableLayer* activationLayer =
2588 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2589
2590 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2591 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2592 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2593 return activationLayer;
2594}
2595
2596TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2597{
2598 if (fileName == nullptr)
2599 {
2600 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2601 CHECK_LOCATION().AsString()));
2602 }
2603 boost::system::error_code errorCode;
2604 boost::filesystem::path pathToFile(fileName);
2605 if (!boost::filesystem::exists(pathToFile, errorCode))
2606 {
2607 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2608 fileName %
2609 errorCode %
2610 CHECK_LOCATION().AsString()));
2611 }
2612 std::ifstream file(fileName, std::ios::binary);
2613 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2614 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2615 fileContent.size());
2616}
2617
2618TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2619{
2620 if (binaryContent == nullptr)
2621 {
2622 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2623 CHECK_LOCATION().AsString()));
2624 }
2625 flatbuffers::Verifier verifier(binaryContent, len);
2626 if (verifier.VerifyBuffer<tflite::Model>() == false)
2627 {
2628 throw ParseException(
2629 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2630 "flatbuffers format. size:%1% %2%") %
2631 len %
2632 CHECK_LOCATION().AsString()));
2633 }
2634 return tflite::UnPackModel(binaryContent);
2635}
2636
2637TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2638 size_t subgraphIndex,
2639 size_t operatorIndex)
2640{
2641 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2642
Derek Lambertiff05cc52019-04-26 13:05:17 +01002643 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2644 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002645
2646 size_t inputCount = operatorPtr->inputs.size();
2647 TensorRawPtrVector result(inputCount);
2648 for (size_t i=0; i<inputCount; ++i)
2649 {
2650 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002651 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002652 }
2653 return result;
2654}
2655
2656TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2657 size_t subgraphIndex,
2658 size_t operatorIndex)
2659{
2660 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2661
Derek Lambertiff05cc52019-04-26 13:05:17 +01002662 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2663 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002664
2665 size_t outputCount = operatorPtr->outputs.size();
2666 TensorRawPtrVector result(outputCount);
2667 for (size_t i=0; i<outputCount; ++i)
2668 {
2669 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2670 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002671 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002672 }
2673 return result;
2674}
2675
2676TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2677 size_t subgraphIndex)
2678{
2679 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002680 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002681
Derek Lambertiff05cc52019-04-26 13:05:17 +01002682 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002683 TensorIdRawPtrVector result(inputCount);
2684 for (size_t i=0; i<inputCount; ++i)
2685 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002686 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002687 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002688 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002689 }
2690 return result;
2691}
2692
2693TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2694 size_t subgraphIndex)
2695{
2696 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002697 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002698
Derek Lambertiff05cc52019-04-26 13:05:17 +01002699 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002700 TensorIdRawPtrVector result(outputCount);
2701 for (size_t i=0; i<outputCount; ++i)
2702 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002703 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2704 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002705 }
2706 return result;
2707}
2708
2709std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2710 size_t subgraphIndex,
2711 size_t operatorIndex)
2712{
2713 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002714 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2715 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002716 return operatorPtr->inputs;
2717}
2718
2719std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2720 size_t subgraphIndex,
2721 size_t operatorIndex)
2722{
2723 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002724 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2725 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002726 return operatorPtr->outputs;
2727}
2728
2729void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2730 size_t operatorIndex,
2731 IConnectableLayer* layer,
2732 const std::vector<unsigned int>& tensorIndexes)
2733{
2734 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2735 BOOST_ASSERT(layer != nullptr);
2736 if (tensorIndexes.size() != layer->GetNumInputSlots())
2737 {
2738 throw ParseException(
2739 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2740 " for subgraph:%3% operator index:%4% %5%") %
2741 tensorIndexes.size() %
2742 layer->GetNumInputSlots() %
2743 subgraphIndex %
2744 operatorIndex %
2745 CHECK_LOCATION().AsString()));
2746 }
2747
2748 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2749 {
2750 unsigned int tensorIndex = tensorIndexes[slotIndex];
2751 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2752 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2753 }
2754}
2755
2756void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2757 size_t operatorIndex,
2758 IConnectableLayer* layer,
2759 const std::vector<unsigned int>& tensorIndexes)
2760{
2761 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2762 BOOST_ASSERT(layer != nullptr);
2763 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2764 {
2765 throw ParseException(
2766 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2767 " for subgraph:%3% operator index:%4% %5%") %
2768 tensorIndexes.size() %
2769 layer->GetNumOutputSlots() %
2770 subgraphIndex %
2771 operatorIndex %
2772 CHECK_LOCATION().AsString()));
2773 }
2774
2775 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2776 {
2777 unsigned int tensorIndex = tensorIndexes[slotIndex];
2778 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2779 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2780 }
2781}
2782
2783void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2784{
2785 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2786
2787 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2788 for (auto const & tensorIdAndPtr : inputs)
2789 {
2790 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2791 IConnectableLayer* layer =
2792 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2793
2794 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2795 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2796
2797 RegisterOutputSlots(subgraphIndex,
2798 VIRTUAL_OPERATOR_ID,
2799 layer,
2800 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2801 }
2802}
2803
2804void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2805{
2806 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2807
2808 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2809 for (auto const & tensorIdAndPtr : outputs)
2810 {
2811 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2812 IConnectableLayer* layer =
2813 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2814
2815 RegisterInputSlots(subgraphIndex,
2816 VIRTUAL_OPERATOR_ID,
2817 layer,
2818 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2819 }
2820}
2821
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002822void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2823{
2824 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2825
Derek Lambertiff05cc52019-04-26 13:05:17 +01002826 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002827 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2828 {
2829 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2830 {
2831 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2832 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2833 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002834 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002835 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2836 auto tensorAndData = CreateConstTensor(tensorPtr,
2837 tensorInfo,
2838 armnn::Optional<armnn::PermutationVector&>());
2839
2840 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2841 IConnectableLayer *layer =
2842 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2843
2844 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2845 RegisterOutputSlots(subgraphIndex,
2846 VIRTUAL_OPERATOR_ID,
2847 layer,
2848 { tensorIndex });
2849
2850 }
2851 }
2852 }
2853}
2854
telsoa01c577f2c2018-08-31 09:22:23 +01002855// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2856TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2857{
2858 CHECK_BUFFER(model, bufferIndex);
2859 return model->buffers[bufferIndex].get();
2860}
2861
Matteo Martincigh747ef822018-12-18 09:26:39 +00002862template<typename T>
2863std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2864TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2865 TfLiteParser::TensorRawPtr tensorPtr,
2866 armnn::TensorInfo& tensorInfo,
2867 armnn::Optional<armnn::PermutationVector&> permutationVector)
2868{
2869 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2870 tensorPtr,
2871 tensorInfo,
2872 permutationVector);
2873 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2874 return std::make_pair(constData.first, std::move(storage));
2875}
2876
telsoa01c577f2c2018-08-31 09:22:23 +01002877std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2878TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002879 armnn::TensorInfo& tensorInfo,
2880 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002881{
2882 CHECK_TENSOR_PTR(tensorPtr);
2883 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2884 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2885
2886 switch (tensorInfo.GetDataType())
2887 {
2888 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002889 return CreateConstTensorAndStoreData<float>(bufferPtr,
2890 tensorPtr,
2891 tensorInfo,
2892 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00002893 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002894 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2895 tensorPtr,
2896 tensorInfo,
2897 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00002898 case armnn::DataType::QSymmS8:
2899 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
2900 tensorPtr,
2901 tensorInfo,
2902 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002903 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002904 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2905 tensorPtr,
2906 tensorInfo,
2907 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002908 default:
2909 {
2910 std::stringstream errString;
2911 errString << "Unexpected datatype when creating const tensor: "
2912 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2913 << " shape:" << tensorInfo.GetShape()
2914 << CHECK_LOCATION().AsString();
2915 throw ParseException(errString.str());
2916 }
2917 }
2918}
2919
2920BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2921 const std::string& name) const
2922{
2923 CHECK_SUBGRAPH(m_Model, subgraphId);
2924 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2925 for (auto const & input : inputs)
2926 {
2927 if (input.second->name == name)
2928 {
2929 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2930 return std::make_pair(bindingId, ToTensorInfo(input.second));
2931 }
2932 }
2933
2934 std::stringstream bindings;
2935 for (auto const & input : inputs)
2936 {
2937 bindings << "'" << input.second->name << "' ";
2938 }
2939
2940 throw ParseException(
2941 boost::str(
2942 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2943 "Possible inputs are: [%3%] %4%") %
2944 subgraphId %
2945 name %
2946 bindings.str() %
2947 CHECK_LOCATION().AsString()));
2948}
2949
2950BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2951 const std::string& name) const
2952{
2953 CHECK_SUBGRAPH(m_Model, subgraphId);
2954 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002955 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002956 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002957 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002958 if (output.second->name == name)
2959 {
2960 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002961 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2962 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2963 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002964 }
2965 }
2966
2967 std::stringstream bindings;
2968 for (auto const & output : outputs)
2969 {
2970 bindings << "'" << output.second->name << "' ";
2971 }
2972
2973 throw ParseException(
2974 boost::str(
2975 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2976 "Possible outputs are: [%3%] %4%") %
2977 subgraphId %
2978 name %
2979 bindings.str() %
2980 CHECK_LOCATION().AsString()));
2981}
2982
2983size_t TfLiteParser::GetSubgraphCount() const
2984{
2985 return m_Model->subgraphs.size();
2986}
2987
2988std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2989{
2990 CHECK_SUBGRAPH(m_Model, subgraphId);
2991 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2992 std::vector<std::string> result;
2993 result.reserve(inputs.size());
2994 for (auto const & input : inputs)
2995 {
2996 result.push_back(input.second->name);
2997 }
2998 return result;
2999}
3000
3001std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
3002{
3003 CHECK_SUBGRAPH(m_Model, subgraphId);
3004 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3005 std::vector<std::string> result;
3006 result.reserve(outputs.size());
3007 for (auto const & output : outputs)
3008 {
3009 result.push_back(output.second->name);
3010 }
3011 return result;
3012}
3013
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003014ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003015{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003016 return new TfLiteParser(options);
telsoa01c577f2c2018-08-31 09:22:23 +01003017}
3018
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003019ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003020{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003021 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +01003022}
3023
3024void ITfLiteParser::Destroy(ITfLiteParser* parser)
3025{
3026 delete parser;
3027}
3028
3029TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
3030: m_FloatData(std::move(data))
3031, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003032, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003033, m_Int32Data(nullptr)
3034{
3035}
3036
3037TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
3038: m_FloatData(nullptr)
3039, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00003040, m_Int8Data(nullptr)
3041, m_Int32Data(nullptr)
3042{
3043}
3044
3045TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
3046: m_FloatData(nullptr)
3047, m_Uint8Data(nullptr)
3048, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01003049, m_Int32Data(nullptr)
3050{
3051}
3052
3053TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
3054: m_FloatData(nullptr)
3055, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003056, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003057, m_Int32Data(std::move(data))
3058{
3059}
3060
3061} // armnnTfLiteParser