blob: 56b59a115f03cf048de462c485a13e51ea152720 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Bentham39ef3e52020-01-20 10:09:09 +00008#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +01009#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000010#include <armnn/Logging.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010011#include <armnn/TypesUtils.hpp>
12#include <boost/filesystem.hpp>
13
14// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000015#include <armnnUtils/Permute.hpp>
16
Sadik Armagan479045b2018-10-01 11:51:37 +010017#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010018#include <VerificationHelpers.hpp>
19
20// The generated code based on the Tf Lite schema:
21#include <schema_generated.h>
22
Matteo Martincighe011d202019-11-28 11:35:47 +000023#include <flatbuffers/flexbuffers.h>
24
telsoa01c577f2c2018-08-31 09:22:23 +010025#include <boost/core/ignore_unused.hpp>
26#include <boost/assert.hpp>
27#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010028#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010029
30#include <fstream>
31#include <algorithm>
32#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010033#include <numeric>
telsoa01c577f2c2018-08-31 09:22:23 +010034
35using namespace armnn;
36using armnn::CheckLocation;
37namespace armnnTfLiteParser
38{
39namespace
40{
jimfly01c25411c2018-11-14 17:47:22 +000041
telsoa01c577f2c2018-08-31 09:22:23 +010042const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
43
44void CheckSubgraph(const TfLiteParser::ModelPtr & model,
45 size_t subgraphIndex,
46 const CheckLocation & location)
47{
48 if (model.get() == nullptr)
49 {
50 throw ParseException(
51 boost::str(
52 boost::format("%1% was called with invalid (null) model. "
53 "Possible reason is that the model is not yet loaded and Unpack(ed). "
54 "subgraph:%2% at %3%") %
55 location.m_Function %
56 subgraphIndex %
57 location.FileLine()));
58 }
59 else if (subgraphIndex >= model->subgraphs.size())
60 {
61 throw ParseException(
62 boost::str(
63 boost::format("%1% was called with an invalid subgraph index. "
64 "subgraph:%2% at %3%") %
65 location.m_Function %
66 subgraphIndex %
67 location.FileLine()));
68 }
69}
70
71#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
72 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
73
74void CheckModel(const TfLiteParser::ModelPtr & model,
75 size_t subgraphIndex,
76 size_t operatorIndex,
77 const CheckLocation & location)
78{
79 if (model.get() == nullptr)
80 {
81 throw ParseException(
82 boost::str(
83 boost::format("%1% was called with invalid (null) model. "
84 "Possible reason is that the model is not yet loaded and Unpack(ed). "
85 "subgraph:%2% operator:%3% at %4%") %
86 location.m_Function %
87 subgraphIndex %
88 operatorIndex %
89 location.FileLine()));
90 }
91 else if (subgraphIndex >= model->subgraphs.size())
92 {
93 throw ParseException(
94 boost::str(
95 boost::format("%1% was called with an invalid subgraph index. "
96 "subgraph:%2% operator:%3% at %4%") %
97 location.m_Function %
98 subgraphIndex %
99 operatorIndex %
100 location.FileLine()));
101 }
102 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
103 operatorIndex != VIRTUAL_OPERATOR_ID)
104 {
105 throw ParseException(
106 boost::str(
107 boost::format("%1% was called with an invalid operator index. "
108 "subgraph:%2% operator:%3% at %4%") %
109 location.m_Function %
110 subgraphIndex %
111 operatorIndex %
112 location.FileLine()));
113 }
114}
115
116#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
117 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
118
119void CheckTensor(const TfLiteParser::ModelPtr & model,
120 size_t subgraphIndex,
121 size_t tensorIndex,
122 const CheckLocation & location)
123{
124 // not checking model, because I assume CHECK_MODEL already run
125 // and checked that. An assert would do.
126 BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
127
128 // also subgraph index should be checked by CHECK_MODEL so
129 // I only add an assert here
130 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
131
132 // the tensor index is the only one to check here
133 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
134 {
135 throw ParseException(
136 boost::str(
137 boost::format("%1% was called with an invalid tensor index. "
138 "subgraph:%2% tensor:%3% at %4%") %
139 location.m_Function %
140 subgraphIndex %
141 tensorIndex %
142 location.FileLine()));
143 }
144}
145
146#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
147 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
148
149void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
150 const CheckLocation & location)
151{
152 if (rawPtr == nullptr)
153 {
154 throw ParseException(
155 boost::str(
156 boost::format("%1% was called with a null tensor pointer. "
157 "at %2%") %
158 location.m_Function %
159 location.FileLine()));
160
161 }
162}
163
164#define CHECK_TENSOR_PTR(TENSOR_PTR) \
165 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
166
167void CheckBuffer(const TfLiteParser::ModelPtr & model,
168 size_t bufferIndex,
169 const CheckLocation & location)
170{
171 if (model.get() == nullptr)
172 {
173 throw ParseException(
174 boost::str(
175 boost::format("%1% was called with invalid (null) model. "
176 "Possible reason is that the model is not yet loaded and Unpack(ed). "
177 "buffer:%2% at %3%") %
178 location.m_Function %
179 bufferIndex %
180 location.FileLine()));
181 }
182 else if (bufferIndex >= model->buffers.size())
183 {
184 throw ParseException(
185 boost::str(
186 boost::format("%1% was called with an invalid buffer index. "
187 "buffer index:%2% at %3%") %
188 location.m_Function %
189 bufferIndex %
190 location.FileLine()));
191 }
192 else if (model->buffers[bufferIndex].get() == nullptr)
193 {
194 throw ParseException(
195 boost::str(
196 boost::format("The buffer #%1% is null. %3%") %
197 bufferIndex %
198 location.AsString()));
199 }
200}
201
202#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
203 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
204
205void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
206 const armnn::TensorInfo & tensorInfo,
207 uint32_t bufferId,
208 const CheckLocation & location)
209{
210 if (bufferPtr == nullptr)
211 {
212 throw ParseException(
213 boost::str(
214 boost::format("BufferPtr is null for buffer:%1%. %2%") %
215 bufferId %
216 location.AsString()));
217 }
218 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
219 tensorInfo.GetNumBytes() > bufferPtr->data.size())
220 {
221 std::stringstream ss;
222 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
223 << "For tensor: " << tensorInfo.GetShape()
224 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
225 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
226 throw ParseException(ss.str());
227 }
228}
229
230#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
231 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
232
233bool IsActivationSupported(tflite::ActivationFunctionType activationType)
234{
235 switch(activationType)
236 {
237 case tflite::ActivationFunctionType_NONE:
238 case tflite::ActivationFunctionType_RELU:
239 case tflite::ActivationFunctionType_RELU6:
240 case tflite::ActivationFunctionType_TANH:
241 {
242 return true;
243 }
244 default:
245 {
246 return false;
247 }
248 }
249}
250
251#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
252 do { \
253 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
254 { \
255 throw ParseException( \
256 boost::str( \
257 boost::format("TfLite parser doesn't suppport fused activation: " \
258 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
259 OPTION->fused_activation_function % \
260 tflite::EnumNameActivationFunctionType(\
261 OPTION->fused_activation_function) % \
262 __func__ % \
263 SUBGRAPH_INDEX % \
264 OPERATOR_INDEX % \
265 CHECK_LOCATION().FileLine())); \
266 } \
267 } while(false)
268
269
270std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
271{
272 std::vector<unsigned int> result;
273 result.reserve(in.size());
274 for (auto & i : in)
275 {
276 result.push_back(CHECKED_NON_NEGATIVE(i));
277 }
278 return result;
279}
280
281void CalcPadding(uint32_t inputSize,
282 uint32_t filterSize,
283 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100284 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100285 uint32_t& paddingFront,
286 uint32_t& paddingBack,
287 tflite::Padding padding)
288{
289 paddingFront = 0;
290 paddingBack = 0;
291 if (padding == tflite::Padding_SAME)
292 {
293 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100294 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
295 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100296 if (temp > inputSize)
297 {
298 paddingFront = (temp - inputSize) / 2;
299 paddingBack = (temp - inputSize) - paddingFront;
300 }
301 }
302}
303
Keith Davis0c2eeac2020-02-11 16:51:50 +0000304armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes,
305 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
telsoa01c577f2c2018-08-31 09:22:23 +0100306{
307 armnn::DataType type;
308 CHECK_TENSOR_PTR(tensorPtr);
309
310 switch (tensorPtr->type)
311 {
312 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000313 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100314 break;
315 case tflite::TensorType_FLOAT32:
316 type = armnn::DataType::Float32;
317 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000318 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000319 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000320 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000321 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000322 type = armnn::DataType::QAsymmS8;
323 }
324 else
325 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000326 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000327 type = armnn::DataType::QSymmS8;
328 }
Finn Williamsed66d142019-12-06 09:55:55 +0000329 break;
330 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000331 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000332 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100333 case tflite::TensorType_INT32:
334 type = armnn::DataType::Signed32;
335 break;
336
337 default:
338 {
339 CheckLocation location = CHECK_LOCATION();
340 throw ParseException(
341 boost::str(
342 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
343 tensorPtr->type %
344 tflite::EnumNameTensorType(tensorPtr->type) %
345 tensorPtr->name %
346 location.AsString()));
347 }
348 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100349 std::vector<unsigned int> safeShape = shapes;
350 if (safeShape.size() == 0)
351 {
352 safeShape.push_back(1);
353 }
354
Keith Davisd305e1a2020-01-22 11:57:54 +0000355 float quantizationScale = 0.0f;
356 int32_t quantizationOffset = 0;
357
358 if (tensorPtr->quantization.get())
359 {
360 if (tensorPtr->quantization->scale.size() <= 1)
361 {
362 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
363 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
364
365 if (tensorPtr->quantization->scale.size() == 1)
366 {
367 quantizationScale = tensorPtr->quantization->scale[0];
368 }
369 if (tensorPtr->quantization->zero_point.size() == 1)
370 {
371 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000372 // but this is what we support at the moment in ArmNN
Keith Davisd305e1a2020-01-22 11:57:54 +0000373 quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
374 }
375
376 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
377 safeShape.data(),
378 type,
379 quantizationScale,
380 quantizationOffset);
381
382 return result;
383 }
384 else
385 {
386 std::vector<float> quantizationScales;
387 std::vector<int32_t> quantizationOffsets;
388
389 // Scale
390 std::copy(tensorPtr->quantization->scale.begin(),
391 tensorPtr->quantization->scale.end(),
392 std::back_inserter(quantizationScales));
393
Keith Davis0c2eeac2020-02-11 16:51:50 +0000394 // QSymmS8 Per-axis
Keith Davisd305e1a2020-01-22 11:57:54 +0000395 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
396 safeShape.data(),
397 type,
398 quantizationScales,
Keith Davis0c2eeac2020-02-11 16:51:50 +0000399 dimensionMappings[boost::numeric_cast<unsigned int>(
400 tensorPtr->quantization->quantized_dimension)]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000401 return result;
402 }
403 }
404 else
405 {
406 armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
407 safeShape.data(),
408 type,
409 quantizationScale,
410 quantizationOffset);
411 return result;
412 }
telsoa01c577f2c2018-08-31 09:22:23 +0100413}
414
Keith Davis0c2eeac2020-02-11 16:51:50 +0000415armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
416 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000417{
418 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000419 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000420}
421
telsoa01c577f2c2018-08-31 09:22:23 +0100422template<typename T>
423std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
424CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
425 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000426 armnn::TensorInfo& tensorInfo,
427 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100428{
Derek Lambertibaa177f2019-12-10 22:00:43 +0000429 boost::ignore_unused(tensorPtr);
telsoa01c577f2c2018-08-31 09:22:23 +0100430 BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
431 BOOST_ASSERT_MSG(bufferPtr != nullptr,
432 boost::str(
433 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
434
435 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000436
437 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
438 {
439 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000440 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
441 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000442 }
443 else
444 {
445 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
446 }
447
telsoa01c577f2c2018-08-31 09:22:23 +0100448 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
449}
450
telsoa01c577f2c2018-08-31 09:22:23 +0100451armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
452{
453 // generate the binding id by shifting the tensor id by 8 bit
454 // and add the subgraph id, which allows 256 subgraphs
455 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
456}
457
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000458bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
459{
460 const unsigned int actualSize = actual.GetNumDimensions();
461 if (actualSize != expected.size())
462 {
463 return false;
464 }
465
466 for (unsigned int i = 0u; i < actualSize; i++)
467 {
468 if (expected[i] < 0 ||
469 actual[i] != static_cast<unsigned int>(expected[i]))
470 {
471 return false;
472 }
473 }
474
475 return true;
476}
477
telsoa01c577f2c2018-08-31 09:22:23 +0100478} // <anonymous>
479
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100480TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
481: m_Options(options)
482, m_Network(nullptr, nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +0100483, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
484{
485 // register supported operators
Sadik Armagan66dedc72019-12-10 16:32:07 +0000486 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000487 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
488 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
489 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
490 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000491 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000492 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
Finn Williamsed66d142019-12-06 09:55:55 +0000493 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000494 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
495 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
496 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
497 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
498 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000499 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000500 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000501 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
502 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
503 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
504 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000505 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
506 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
507 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
508 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
509 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000510 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000511 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
512 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000513 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000514 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
515 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
516 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000517 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
518 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
519 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
520 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100521
522 // register supported custom operators
523 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100524}
525
526void TfLiteParser::ResetParser()
527{
528 m_Network = armnn::INetworkPtr(nullptr, nullptr);
529 m_Model = nullptr;
530 m_SubgraphConnections.clear();
531}
532
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200533void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
534 size_t operatorIndex,
535 IConnectableLayer *layer)
536{
537 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
538 BOOST_ASSERT(layer != nullptr);
539
Derek Lambertiff05cc52019-04-26 13:05:17 +0100540 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
541 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200542
543 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
544
545 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100546 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200547 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100548 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200549
550 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
551 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
552
553 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
554 {
555 uint32_t id = reshapedInputId;
556 reshapedInputId = inputId;
557 inputId = id;
558
559 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
560 inputTensorInfo = ToTensorInfo(tensorPtr);
561 }
562
563 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
564
565 std::vector<unsigned> reshapedDim;
566 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
567 {
568 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
569 }
570
571 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
572 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
573
574 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
575
576 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
577 armnn::ReshapeDescriptor desc;
578 desc.m_TargetShape = reshapedTensorInfo.GetShape();
579 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
580
581 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
582 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
583
584 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
585
586 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(1));
587 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
588}
589
telsoa01c577f2c2018-08-31 09:22:23 +0100590INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
591{
592 ResetParser();
593 m_Model = LoadModelFromFile(graphFile);
594 return CreateNetworkFromModel();
595}
596
597INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
598{
599 ResetParser();
600 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
601 return CreateNetworkFromModel();
602}
603
604INetworkPtr TfLiteParser::CreateNetworkFromModel()
605{
606 m_Network = INetwork::Create();
607 BOOST_ASSERT(m_Model.get() != nullptr);
608
609 bool failedToCreate = false;
610 std::stringstream errors;
611
612 if (m_Model->subgraphs.size() != 1)
613 {
614 throw ParseException(
615 boost::str(
616 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
617 m_Model->subgraphs.size() %
618 CHECK_LOCATION().AsString()));
619 }
620
621 size_t subgraphIndex = 0;
Derek Lambertiff05cc52019-04-26 13:05:17 +0100622 for (SubgraphPtr const & subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100623 {
624 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
625
626 size_t operatorIndex = 0;
627 for (OperatorPtr const & op : subgraph->operators)
628 {
629 try
630 {
telsoa01c577f2c2018-08-31 09:22:23 +0100631 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
632 auto builtinCode = opCodePtr->builtin_code;
633
634 if (builtinCode > tflite::BuiltinOperator_MAX)
635 {
636 throw ParseException(
637 boost::str(
638 boost::format("Operator code %1% is out of range 0-%2%. "
639 "subgraph:%3% operator idx:%4%. %5%") %
640 builtinCode %
641 tflite::BuiltinOperator_MAX %
642 subgraphIndex %
643 operatorIndex %
644 CHECK_LOCATION().AsString()));
645 }
646
647 // lookup and call the parser function
648 auto & parserFunction = m_ParserFunctions[builtinCode];
649 (this->*parserFunction)(subgraphIndex, operatorIndex);
650 }
651 catch (const ParseException& e)
652 {
653 failedToCreate = true;
654 std::stringstream errorString;
655
656 errorString << "Failed to parse operator #" << operatorIndex
657 << " within subgraph #" << subgraphIndex
658 << " error: " << e.what();
Derek Lamberti08446972019-11-26 16:38:31 +0000659 ARMNN_LOG(error) << errorString.str();
telsoa01c577f2c2018-08-31 09:22:23 +0100660
661 errors << errorString.str() << "\n";
662 }
663 ++operatorIndex;
664 }
665
666 SetupInputLayers(subgraphIndex);
667 SetupOutputLayers(subgraphIndex);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -0200668 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100669
670 ++subgraphIndex;
671 }
672
673 if (failedToCreate)
674 {
675 // we can skip everything and let the outer exception handler deal with the error
676 throw ParseException(errors.str());
677 }
678
679 // establish the connections from the layer outputs to the inputs of the subsequent layers
680 for (size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
681 {
682 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
683 {
684 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
685 {
686 for (size_t inputSlotIdx = 0;
687 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
688 ++inputSlotIdx)
689 {
690 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
691 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
692 }
693 }
694 }
695 }
696
697 return std::move(m_Network);
698}
699
700void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
701 size_t tensorIndex,
702 armnn::IOutputSlot* slot)
703{
704 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
705 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
706 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
707
708 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
709
710 // assuming there is only one producer for that tensor
711 if (tensorSlots.outputSlot != nullptr)
712 {
713 throw ParseException(boost::str(
714 boost::format("Another layer has already registered itself as the producer of "
715 "subgraph:%1% tensor:%2% %3%") %
716 subgraphIndex %
717 tensorIndex %
718 CHECK_LOCATION().AsString()));
719 }
720
721 tensorSlots.outputSlot = slot;
722}
723
724void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
725 size_t tensorIndex,
726 armnn::IInputSlot* slot)
727{
728 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
729 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
730 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
731
732 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
733 tensorSlots.inputSlots.push_back(slot);
734}
735
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100736void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
737{
738 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
739
740 // NOTE: By default we presume the custom operator is not supported
741 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
742
743 // Identify custom code defined for custom operator
744 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
745 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
746
747 // Find parser function that correspondes to custom code (if any)
748 auto iterator = m_CustomParserFunctions.find(customCode);
749 if (iterator != m_CustomParserFunctions.end())
750 {
751 customParserFunction = iterator->second;
752 }
753
754 // Run parser function
755 (this->*customParserFunction)(subgraphIndex, operatorIndex);
756}
757
telsoa01c577f2c2018-08-31 09:22:23 +0100758void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
759{
760 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100761
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100762 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
763
764 auto opcodeIndex = operatorPtr->opcode_index;
765 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
766
767 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
768 {
769 // Do not add StandInLayer, throw ParseException instead
770 throw ParseException(
771 boost::str(
772 boost::format("Operator not supported. "
773 "subgraph:%1% operator:%2% "
774 "opcode_index:%3% opcode:%4% / %5% %6%") %
775 subgraphIndex %
776 operatorIndex %
777 opcodeIndex %
778 opcode %
779 tflite::EnumNameBuiltinOperator(opcode) %
780 CHECK_LOCATION().AsString()));
781 }
782
783 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
784 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
785
786 const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
787 const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
788
789 StandInDescriptor descriptor(numInputs, numOutputs);
790 auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
791
792 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
793 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
794 for (unsigned int i = 0u; i < numOutputs; ++i)
795 {
796 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i]));
797 }
798
799 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
800 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
801
802 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
803 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100804}
805
telsoa01c577f2c2018-08-31 09:22:23 +0100806void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
807{
808 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
809
810 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
811 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
812
813 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
814
815 Convolution2dDescriptor desc;
816 desc.m_BiasEnabled = false;
817 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
818 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000819 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100820 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
821 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000822
telsoa01c577f2c2018-08-31 09:22:23 +0100823 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
824 CHECK_VALID_SIZE(inputs.size(), 2, 3);
825
826 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
827 CHECK_VALID_SIZE(outputs.size(), 1);
828
829 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
830 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
831
832 // assuming input is NHWC
833 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
834 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
835
836 // assuming the filter is OHWI : Output, H, W, Input
837 // which is essentially the same as NHWC
838 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
839 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
840
Pablo Tellof0bd6832019-04-26 17:58:13 +0100841 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
842 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
843 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
844 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100845
Matteo Martincigh747ef822018-12-18 09:26:39 +0000846 auto filterTensorAndData = CreateConstTensor(inputs[1],
847 filterTensorInfo,
848 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100849 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100850
851 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
852
853 if (inputs.size() == 3)
854 {
855 desc.m_BiasEnabled = true;
856 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000857 auto biasTensorAndData = CreateConstTensor(inputs[2],
858 biasTensorInfo,
859 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100860 layer = m_Network->AddConvolution2dLayer(desc,
861 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100862 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100863 layerName.c_str());
864 }
865 else
866 {
867 layer = m_Network->AddConvolution2dLayer(desc,
868 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100869 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100870 layerName.c_str());
871 }
872
873 BOOST_ASSERT(layer != nullptr);
874
telsoa01c577f2c2018-08-31 09:22:23 +0100875 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000876 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100877
878 // register the input connection slots for the layer, connections are made after all layers have been created
879 // only the tensors for the inputs are relevant, exclude the const tensors
880 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000881 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100882
jimfly01c25411c2018-11-14 17:47:22 +0000883 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100884 // register the output connection slots for the layer, connections are made after all layers have been created
885 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
886 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
887}
888
889void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
890{
891 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
892
893 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
894 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
895
896 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
897
898 DepthwiseConvolution2dDescriptor desc;
899 desc.m_BiasEnabled = false;
900 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
901 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000902 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100903 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100904
905 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
906 CHECK_VALID_SIZE(inputs.size(), 2, 3);
907 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
908 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100909 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
910 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000911
Keith Davis0c2eeac2020-02-11 16:51:50 +0000912 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
913 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
914
telsoa01c577f2c2018-08-31 09:22:23 +0100915 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000916 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1], permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100917
Matteo Martincigh747ef822018-12-18 09:26:39 +0000918 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100919 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
920 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000921
922 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100923 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
924 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
925
Matteo Martincigh747ef822018-12-18 09:26:39 +0000926 // Reshape weights as [ H, W, I, M ]
927 filterTensorInfo.SetShape({ filterHeight,
928 filterWidth,
929 inputTensorInfo.GetShape()[3],
930 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
931
Pablo Tellof0bd6832019-04-26 17:58:13 +0100932 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
933 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
934 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
935 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100936
Matteo Martincigh747ef822018-12-18 09:26:39 +0000937 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100938 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100939 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
940
941 if (inputs.size() == 3)
942 {
943 desc.m_BiasEnabled = true;
944 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000945 auto biasTensorAndData = CreateConstTensor(inputs[2],
946 biasTensorInfo,
947 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100948 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
949 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100950 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100951 layerName.c_str());
952 }
953 else
954 {
955 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
956 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100957 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100958 layerName.c_str());
959 }
960 BOOST_ASSERT(layer != nullptr);
961
telsoa01c577f2c2018-08-31 09:22:23 +0100962 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
jimfly01c25411c2018-11-14 17:47:22 +0000963 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100964
965 // register the input connection slots for the layer, connections are made after all layers have been created
966 // only the tensors for the inputs are relevant, exclude the const tensors
967 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000968 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100969
jimfly01c25411c2018-11-14 17:47:22 +0000970 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100971 // register the output connection slots for the layer, connections are made after all layers have been created
972 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
973 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
974}
975
Finn Williamsed66d142019-12-06 09:55:55 +0000976void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
977{
978 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
979
980 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
981 CHECK_VALID_SIZE(inputs.size(), 1);
982
983 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
984 CHECK_VALID_SIZE(outputs.size(), 1);
985
986 auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
987
988 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
989 BOOST_ASSERT(layer != nullptr);
990
991 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
992 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
993
994 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
995 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
996
997 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
998 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
999}
1000
Keith Davis4cd29a02019-09-09 14:49:20 +01001001void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
1002{
1003 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1004
1005 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001006 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001007
1008 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1009 CHECK_VALID_SIZE(outputs.size(), 1);
1010
1011 armnn::IConnectableLayer* layer = nullptr;
1012 auto layerName = boost::str(boost::format("Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
1013
Mike Kelly08759e22020-03-02 11:41:31 +00001014 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001015
josh minorba424d22019-11-13 10:55:17 -06001016 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001017 {
1018 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1019 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001020 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1021 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001022 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001023 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001024
Mike Kelly08759e22020-03-02 11:41:31 +00001025 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001026 }
1027
Mike Kelly08759e22020-03-02 11:41:31 +00001028 layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Keith Davis4cd29a02019-09-09 14:49:20 +01001029
1030 BOOST_ASSERT(layer != nullptr);
1031
1032 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1033 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1034
1035 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1036 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1037
1038 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1039 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1040}
1041
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001042void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
1043{
1044 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1045
1046 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1047 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1048
1049 TransposeConvolution2dDescriptor desc;
1050 desc.m_BiasEnabled = false;
1051 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1052 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1053 desc.m_DataLayout = armnn::DataLayout::NHWC;
1054
1055 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001056 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001057
1058 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1059 CHECK_VALID_SIZE(outputs.size(), 1);
1060
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001061 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001062 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1063
1064 // TfLite uses NHWC tensors
1065 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1066 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1067
1068 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1069 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1070
1071 CalcPadding(inputHeight,
1072 filterHeight,
1073 desc.m_StrideY,
1074 1, // DilationY
1075 desc.m_PadTop,
1076 desc.m_PadBottom,
1077 options->padding);
1078
1079 CalcPadding(inputWidth,
1080 filterWidth,
1081 desc.m_StrideX,
1082 1, // DilationX
1083 desc.m_PadLeft,
1084 desc.m_PadRight,
1085 options->padding);
1086
1087 auto filterTensorAndData = CreateConstTensor(inputs[1],
1088 filterTensorInfo,
1089 armnn::Optional<armnn::PermutationVector&>());
1090
1091 armnn::IConnectableLayer* layer = nullptr;
1092 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
1093
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001094 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1095 filterTensorAndData.first,
1096 EmptyOptional(),
1097 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001098
1099 BOOST_ASSERT(layer != nullptr);
1100
1101 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1102 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1103
1104 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1105 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001106 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001107
1108 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1109 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1110}
1111
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001112void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
1113{
1114 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1115}
1116
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001117void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
1118{
1119 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1120
1121 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1122 CHECK_VALID_SIZE(inputs.size(), 3);
1123
1124 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1125 CHECK_VALID_SIZE(outputs.size(), 1);
1126
1127 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1128 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1129
1130 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1131 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1132
1133 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1134 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1135
1136 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1137 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1138
1139 size_t step = 2;
1140 std::vector<std::pair<unsigned int, unsigned int>> crops;
1141 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1142 {
1143 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1144 }
1145
1146 armnn::BatchToSpaceNdDescriptor desc;
1147 desc.m_BlockShape = blockShape;
1148 desc.m_Crops = crops;
1149 desc.m_DataLayout = armnn::DataLayout::NHWC;
1150
1151 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1152
1153 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1154 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1155
1156 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1157
1158 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1159 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1160
1161 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1162 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1163}
1164
Matthew Jackson28c94572019-07-18 10:47:03 +01001165void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1166{
1167 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1168
1169 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1170 CHECK_VALID_SIZE(inputs.size(), 1);
1171
1172 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1173 CHECK_VALID_SIZE(outputs.size(), 1);
1174
1175 L2NormalizationDescriptor desc;
1176 desc.m_DataLayout = armnn::DataLayout::NHWC;
1177 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1178 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1179
1180 BOOST_ASSERT(layer != nullptr);
1181
1182 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1183 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1184
1185 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1186 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1187
1188 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1189 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1190}
1191
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001192void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1193{
1194 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1195}
1196
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001197void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1198{
1199 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1200
1201 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1202 CHECK_VALID_SIZE(inputs.size(), 2);
1203
1204 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1205 CHECK_VALID_SIZE(outputs.size(), 1);
1206
1207 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1208 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1209
1210 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1211 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1212
1213 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1214 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1215
1216 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1217 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1218 {
1219 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1220 }
1221 else
1222 {
1223 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1224 }
1225
1226 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1227 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1228}
1229
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001230void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1231{
1232 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1233
1234 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1235 CHECK_VALID_SIZE(inputs.size(), 2);
1236
1237 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1238 CHECK_VALID_SIZE(outputs.size(), 1);
1239
1240 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1241 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1242
1243 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1244 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1245
1246 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1247 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1248
1249 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1250 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1251 {
1252 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1253 }
1254 else
1255 {
1256 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1257 }
1258
1259 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1260 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1261}
1262
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001263void TfLiteParser::ParsePool(size_t subgraphIndex,
1264 size_t operatorIndex,
1265 PoolingAlgorithm algorithm)
1266{
1267 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1268
1269 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1270 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1271
1272 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1273
1274 std::string layerName;
1275
1276 switch (algorithm)
1277 {
1278 case PoolingAlgorithm::Average:
1279 layerName =
1280 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1281 break;
1282 case PoolingAlgorithm::Max:
1283 layerName =
1284 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1285 break;
1286 default:
1287 BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
1288 }
1289
1290 Pooling2dDescriptor desc;
1291
1292 desc.m_PoolType = algorithm;
1293 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1294 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1295 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1296 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1297 desc.m_PaddingMethod = PaddingMethod::Exclude;
1298 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001299 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001300
1301 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1302 CHECK_VALID_SIZE(inputs.size(), 1);
1303 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1304
1305 // assuming input is NHWC
1306 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1307 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1308
Pablo Tellof0bd6832019-04-26 17:58:13 +01001309 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1310 desc.m_PadTop, desc.m_PadBottom, options->padding);
1311 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1312 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001313
1314 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1315 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001316
1317 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1318
1319 BOOST_ASSERT(layer != nullptr);
1320
jimfly01c25411c2018-11-14 17:47:22 +00001321 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1322 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001323
1324 // register the input connection slots for the layer, connections are made after all layers have been created
1325 // only the tensors for the inputs are relevant, exclude the const tensors
1326 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001327 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001328
jimfly01c25411c2018-11-14 17:47:22 +00001329 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001330 // register the output connection slots for the layer, connections are made after all layers have been created
1331 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1332 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1333}
1334
josh minorba424d22019-11-13 10:55:17 -06001335void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
1336{
1337 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1338
1339 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1340 CHECK_VALID_SIZE(inputs.size(), 3);
1341 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1342 CHECK_VALID_SIZE(outputs.size(), 1);
1343
1344 SliceDescriptor desc;
1345
1346 // set begin tensor info for slice descriptor
1347 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1348 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1349
1350 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1351 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1352
1353 // set size tensor info for slice descriptor
1354 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1355 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1356
1357 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1358 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1359 desc = SliceDescriptor(begin, size);
1360
1361 auto layerName = boost::str(boost::format("Slice:%1%:%2%") % subgraphIndex % operatorIndex);
1362 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
1363
1364 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1365 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1366
1367 // register the input connection slots for the layer, connections are made after all layers have been created
1368 // only the tensors for the inputs are relevant, exclude the const tensors
1369 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1370 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1371
1372 // register the output connection slots for the layer, connections are made after all layers have been created
1373 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1374 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1375}
1376
telsoa01c577f2c2018-08-31 09:22:23 +01001377void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1378{
1379 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1380 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1381 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1382
1383 SoftmaxDescriptor desc;
1384 desc.m_Beta = options->beta;
1385
1386 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1387 CHECK_VALID_SIZE(inputs.size(), 1);
1388 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1389 CHECK_VALID_SIZE(outputs.size(), 1);
1390
1391 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1392 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1393
1394 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1395 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1396
1397 // register the input connection slots for the layer, connections are made after all layers have been created
1398 // only the tensors for the inputs are relevant, exclude the const tensors
1399 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1400 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1401
1402 // register the output connection slots for the layer, connections are made after all layers have been created
1403 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1404 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1405}
1406
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001407void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1408{
1409 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1410
1411 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1412 CHECK_VALID_SIZE(inputs.size(), 3);
1413
1414 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1415 CHECK_VALID_SIZE(outputs.size(), 1);
1416
1417 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1418 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1419
1420 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1421 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1422
1423 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1424 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1425
1426 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1427 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1428
1429 size_t step = 2;
1430 std::vector<std::pair<unsigned int, unsigned int>> padList;
1431 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1432 {
1433 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1434 }
1435
1436 armnn::SpaceToBatchNdDescriptor desc;
1437 desc.m_BlockShape = blockShape;
1438 desc.m_PadList = padList;
1439 desc.m_DataLayout = armnn::DataLayout::NHWC;
1440
1441 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1442
1443 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1444 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1445
1446 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1447
1448 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1449 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1450
1451 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1452 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1453}
1454
telsoa01c577f2c2018-08-31 09:22:23 +01001455armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1456 const armnn::TensorInfo & inputTensorInfo)
1457{
1458 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1459 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1460 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1461
1462 if (inputTensorInfo.GetNumDimensions() > 4)
1463 {
1464 std::stringstream ss;
1465 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1466 << " shape:" << inputTensorInfo.GetShape() << " "
1467 << CHECK_LOCATION().AsString();
1468 throw ParseException(ss.str());
1469 }
1470
1471 if (squeezeDims.empty())
1472 {
1473 squeezeDims.assign(dimensionSequence,
1474 dimensionSequence+inputTensorInfo.GetNumDimensions());
1475 }
1476
1477 std::vector<uint32_t> outputDims;
1478 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1479 {
1480 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1481 auto currentDimension = inputTensorInfo.GetShape()[i];
1482 if (skipSqueeze || currentDimension != 1)
1483 {
1484 outputDims.push_back(currentDimension);
1485 }
1486 }
1487
1488 if (outputDims.size() > 4)
1489 {
1490 std::stringstream ss;
1491 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1492 << " shape:" << inputTensorInfo.GetShape() << " "
1493 << CHECK_LOCATION().AsString();
1494 throw ParseException(ss.str());
1495 }
1496
1497 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1498 outputDims.data());
1499
1500 // we need to preserve the tensor type and the quantization data as well
1501 TensorInfo outTensorInfo = inputTensorInfo;
1502 outTensorInfo.SetShape(outShape);
1503
1504 return outTensorInfo;
1505}
1506
1507void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1508{
1509 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1510
1511 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1512 CHECK_VALID_SIZE(inputs.size(), 1);
1513
1514 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1515 CHECK_VALID_SIZE(outputs.size(), 1);
1516
1517 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1518 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
1519
1520 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1521 armnn::TensorInfo outputTensorInfo =
1522 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1523 inputTensorInfo);
1524
1525 ReshapeDescriptor reshapeDesc;
1526 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1527
1528 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1529 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1530 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1531
1532 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1533 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1534
1535 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1536 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1537}
1538
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001539void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1540{
1541 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1542
1543 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1544 CHECK_VALID_SIZE(inputs.size(), 4);
1545
1546 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1547 CHECK_VALID_SIZE(outputs.size(), 1);
1548
1549 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1550 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1551
1552 StridedSliceDescriptor desc;
1553 desc.m_BeginMask = options->begin_mask;
1554 desc.m_EllipsisMask = options->ellipsis_mask;
1555 desc.m_EndMask = options->end_mask;
1556 desc.m_NewAxisMask = options->new_axis_mask;
1557 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1558 desc.m_DataLayout = armnn::DataLayout::NHWC;
1559
1560 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1561 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1562
1563 std::vector<int> begin(beginTensorInfo.GetNumElements());
1564 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1565
1566 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1567 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1568
1569 std::vector<int> end(endTensorInfo.GetNumElements());
1570 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1571
1572 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1573 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1574
1575 std::vector<int> stride(strideTensorInfo.GetNumElements());
1576 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1577
1578 desc.m_Begin = begin;
1579 desc.m_End = end;
1580 desc.m_Stride = stride;
1581
1582 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1583 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1584
1585 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1586 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1587
1588 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1589 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1590
1591 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1592 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1593}
1594
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001595void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1596{
1597 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1598
1599 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1600 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1601
1602 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1603 CHECK_VALID_SIZE(inputs.size(), 2);
1604
1605 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1606 CHECK_VALID_SIZE(outputs.size(), 1);
1607
1608 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1609 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1610
1611 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1612 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
1613
1614 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1615 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1616
1617 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1618 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1619 {
1620 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1621 }
1622 else
1623 {
1624 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1625 }
1626
1627 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1628
1629 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1630 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1631}
1632
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001633void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1634{
1635 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1636
1637 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1638 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1639
1640 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1641 CHECK_VALID_SIZE(inputs.size(), 2);
1642
1643 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1644 CHECK_VALID_SIZE(outputs.size(), 1);
1645
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001646 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1647 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1648
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001649 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1650 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
1651
1652 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1653 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1654
1655 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001656 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1657 {
1658 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1659 }
1660 else
1661 {
1662 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1663 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001664
1665 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1666
1667 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1668 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1669}
1670
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001671void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1672{
1673 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1674
1675 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1676 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1677
1678 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1679 CHECK_VALID_SIZE(inputs.size(), 2);
1680
1681 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1682 CHECK_VALID_SIZE(outputs.size(), 1);
1683
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001684 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1685 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1686
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001687 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1688 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1689
1690 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1691 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1692
1693 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001694 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1695 {
1696 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1697 }
1698 else
1699 {
1700 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1701 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001702
1703 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1704
1705 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1706 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1707}
1708
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001709void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1710{
1711 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1712
1713 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1714
1715 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1716 CHECK_VALID_SIZE(outputs.size(), 1);
1717
1718 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1719 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1720
1721 armnn::MeanDescriptor desc;
1722 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1723 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1724 desc.m_Axis = axis;
1725
1726 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1727 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1728
1729 desc.m_KeepDims =
1730 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1731 true : false;
1732
1733 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1734 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
1735
1736 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1737
1738 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1739 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1740
1741 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1742 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1743}
1744
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001745void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1746{
1747 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1748
1749 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1750
1751 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1752 CHECK_VALID_SIZE(outputs.size(), 1);
1753
1754 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1755 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1756
1757 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1758 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1759
1760 size_t step = 2;
1761 armnn::PadDescriptor desc;
1762 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1763 {
1764 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1765 }
1766
1767 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1768 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1769
1770 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1771 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1772
1773 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1774 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1775
1776 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1777 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1778}
1779
Sadik Armagan66dedc72019-12-10 16:32:07 +00001780void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
1781{
1782 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1783
1784 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1785 CHECK_VALID_SIZE(inputs.size(), 1);
1786
1787 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1788 CHECK_VALID_SIZE(outputs.size(), 1);
1789
1790 auto layerName = boost::str(boost::format("Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
1791
1792 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
1793 BOOST_ASSERT(layer != nullptr);
1794
1795 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1796 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1797
1798 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1799 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1800
1801 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1802 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1803}
Finn Williamsc42c3842019-01-22 14:18:11 +00001804
Sadik Armagan58f39192018-09-17 14:14:39 +01001805void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1806{
Finn Williamsc42c3842019-01-22 14:18:11 +00001807 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001808}
1809
1810void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1811{
Finn Williamsc42c3842019-01-22 14:18:11 +00001812 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1813}
Sadik Armagan58f39192018-09-17 14:14:39 +01001814
Finn Williamsc42c3842019-01-22 14:18:11 +00001815void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1816{
1817 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1818}
1819
Nina Drozd99851762019-04-09 09:37:38 +01001820void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1821{
1822 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1823}
1824
Finn Williamsc42c3842019-01-22 14:18:11 +00001825
1826void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1827{
1828 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001829 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1830 boost::ignore_unused(operatorPtr);
1831
1832 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1833 CHECK_VALID_SIZE(inputs.size(), 1);
1834
1835 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1836 CHECK_VALID_SIZE(outputs.size(), 1);
1837
Finn Williamsc42c3842019-01-22 14:18:11 +00001838 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01001839 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001840 activationDesc.m_Function = activationType;
1841
1842 switch (activationType)
1843 {
1844 case ActivationFunction::ReLu:
1845 {
1846 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1847 break;
1848 }
1849 case ActivationFunction::BoundedReLu:
1850 {
1851 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1852 activationDesc.m_A = 6.0f;
1853 activationDesc.m_B = 0.0f;
1854 break;
1855 }
1856 case ActivationFunction::Sigmoid:
1857 {
1858 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1859 break;
1860 }
Nina Drozd99851762019-04-09 09:37:38 +01001861 case ActivationFunction::TanH:
1862 {
1863 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1864 activationDesc.m_A = 1.0f;
1865 activationDesc.m_B = 1.0f;
1866 break;
1867 }
Finn Williamsc42c3842019-01-22 14:18:11 +00001868 default:
1869 {
1870 throw ParseException(
1871 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
1872 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
1873 }
1874 }
1875
1876 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001877
1878 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1879 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1880
1881 // register the input connection slots for the layer, connections are made after all layers have been created
1882 // only the tensors for the inputs are relevant, exclude the const tensors
1883 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1884 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1885
1886 // register the output connection slots for the layer, connections are made after all layers have been created
1887 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1888 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1889}
Sadikb94967b2018-09-19 15:30:00 +01001890armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
1891 const std::vector<int32_t> & targetDimsIn)
1892{
1893 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1894 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1895
1896 if (stretchDim != targetDimsIn.end())
1897 {
1898 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1899 {
1900 throw ParseException(
1901 boost::str(
1902 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1903 }
1904
1905 auto targetNumElements =
1906 boost::numeric_cast<unsigned int>(
1907 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1908
1909 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1910 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1911 }
1912
1913 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1914
1915 TensorInfo reshapeInfo = inputTensorInfo;
1916 reshapeInfo.SetShape(outputShape);
1917
1918 return reshapeInfo;
1919}
1920
1921void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
1922{
1923 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1924
1925 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01001926
1927 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1928 CHECK_VALID_SIZE(outputs.size(), 1);
1929
1930 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1931 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
1932
1933 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00001934 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
1935 armnn::TensorInfo reshapeOutputTensorInfo =
Sadikb94967b2018-09-19 15:30:00 +01001936 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
1937
kevmay0171972a82018-12-17 14:28:03 +00001938 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001939 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
1940 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00001941 {
1942 std::stringstream ss;
1943 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00001944 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00001945 << " does not equal output shape "
1946 << actualOutputTensorInfo.GetShape()
1947 << ": "
1948 << CHECK_LOCATION().AsString();
1949 throw ParseException(ss.str());
1950 }
1951
Sadikb94967b2018-09-19 15:30:00 +01001952 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00001953 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01001954
1955 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1956 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
kevmay0171972a82018-12-17 14:28:03 +00001957 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01001958
1959 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1960 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1961
1962 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1963 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1964}
1965
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001966void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
1967{
Sadik Armagana3b31f02019-12-05 09:08:53 +00001968 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
1969}
1970
1971void TfLiteParser::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
1972{
1973 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
1974}
1975
1976void TfLiteParser::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
1977{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001978 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1979
1980 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1981 CHECK_VALID_SIZE(inputs.size(), 2);
1982
1983 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1984 CHECK_VALID_SIZE(outputs.size(), 1);
1985
1986 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
1987
1988 // Data for the parsed tensor args (size) must be stored locally.
1989 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
1990
1991 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1992 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1993
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001994 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00001995 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001996 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001997 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1998 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02001999
Sadik Armagana3b31f02019-12-05 09:08:53 +00002000 auto layerName = str(boost::format("Resize:"));
2001
2002 switch (resizeMethod)
2003 {
2004 case ResizeMethod::Bilinear:
2005 {
2006 layerName += str(boost::format("BILINEAR:%1%:%2%") % subgraphIndex % operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002007
2008 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2009 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2010
2011 desc.m_BilinearAlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002012 break;
2013 }
2014 case ResizeMethod::NearestNeighbor:
2015 {
2016 layerName += str(boost::format("NEARESTNEIGHBOR:%1%:%2%") % subgraphIndex % operatorIndex);
2017 break;
2018 }
2019 default:
2020 {
2021 throw ParseException(
2022 boost::str(boost::format("Unexpected ResizeMethod[%1%] when creating layerName "
2023 " %2% ") %static_cast<int>(resizeMethod)% CHECK_LOCATION().AsString()));
2024 }
2025 }
2026
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002027 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002028
2029 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2030 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2031
2032 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2033 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2034
2035 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2036 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2037}
2038
Sadik Armagan479045b2018-10-01 11:51:37 +01002039void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
2040{
2041 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2042
2043 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2044 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2045
2046 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2047
2048 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2049 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2050 CHECK_VALID_SIZE(outputs.size(), 1);
2051
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002052 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2053 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002054
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002055 const unsigned int concatDimInput = static_cast<unsigned int>(
2056 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002057
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002058 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2059 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002060
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002061 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002062
2063 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2064 {
2065 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2066
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002067 // This set up concatDescriptor view origin
2068 armnnUtils::ProcessConcatInputTensorInfo(
2069 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002070 }
2071
2072 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Jim Flynn906f9462019-05-10 13:55:21 +01002073 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Sadik Armagan479045b2018-10-01 11:51:37 +01002074
2075 BOOST_ASSERT(layer != nullptr);
2076
2077 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2078 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Sadik Armagan479045b2018-10-01 11:51:37 +01002079
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002080 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002081
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002082 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002083
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002084 // add fused activation layer
2085 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002086
Sadik Armagan479045b2018-10-01 11:51:37 +01002087 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2088 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2089}
2090
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002091void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
2092{
2093 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2094
2095 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2096 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2097
2098 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2099
2100 FullyConnectedDescriptor desc;
2101 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002102 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002103
2104 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2105 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2106 CHECK_VALID_SIZE(outputs.size(), 1);
2107
2108 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2109
2110 // Fully Connected Layer accepts two dimensional weights input
2111 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2112 if (weightsDimension != 2)
2113 {
2114 throw ParseException(
2115 boost::str(
2116 boost::format(
2117 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
2118 "Node %2%")
2119 % weightsDimension
2120 % CHECK_LOCATION().AsString()));
2121 }
2122
Matteo Martincigh747ef822018-12-18 09:26:39 +00002123 auto filterTensorAndData = CreateConstTensor(inputs[1],
2124 filterTensorInfo,
2125 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002126 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002127 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
2128
2129 if (inputs.size() == 3)
2130 {
2131 desc.m_BiasEnabled = true;
2132 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00002133 auto biasTensorAndData = CreateConstTensor(inputs[2],
2134 biasTensorInfo,
2135 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002136 layer = m_Network->AddFullyConnectedLayer(desc,
2137 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002138 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002139 layerName.c_str());
2140 }
2141 else
2142 {
2143 layer = m_Network->AddFullyConnectedLayer(desc,
2144 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002145 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002146 layerName.c_str());
2147 }
2148 BOOST_ASSERT(layer != nullptr);
2149
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002150 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2151
2152 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2153
2154 if (inputTensorInfo.GetNumDimensions() > 2)
2155 {
2156 // Add reshape to flatten to 2D [batch_size, input_size],
2157 // where "input_size" corresponds to the number of inputs to the layer,
2158 // matching the second dimension of weights,
2159 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2160 std::vector<unsigned int> reshapedDimensions(2);
2161 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2162 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2163
2164 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2165 {
2166 throw ParseException(
2167 boost::str(
2168 boost::format(
2169 "Failed to deduce input tensor shape from filter size %1%")
2170 % reshapedDimensions[1]
2171 % CHECK_LOCATION().AsString()));
2172 }
2173
2174 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2175 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2176
2177 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2178 armnn::ReshapeDescriptor desc;
2179 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2180 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2181
2182 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2183 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2184
2185 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2186 }
2187 else
2188 {
2189 // register the input connection slot for the layer
2190 // only the tensors for the inputs are relevant, exclude the const tensors
2191 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2192 }
2193
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002194 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2195 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2196
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002197 // we need to add the activation layer and fortunately we don't need to care about the data layout
2198 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2199 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002200
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002201 // register the output connection slots for the layer, connections are made after all layers have been created
2202 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2203 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2204}
2205
keidav011b3e2ea2019-02-21 10:07:37 +00002206void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
2207{
2208 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2209
2210 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2211
2212 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2213 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2214 CHECK_VALID_SIZE(outputs.size(), 4);
2215
2216 // Obtain custom options from flexbuffers
2217 auto custom_options = operatorPtr->custom_options;
2218 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2219
2220 // Obtain descriptor information from tf lite
2221 DetectionPostProcessDescriptor desc;
2222 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2223 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2224 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2225 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2226 desc.m_NumClasses = m["num_classes"].AsUInt32();
2227 desc.m_ScaleH = m["h_scale"].AsFloat();
2228 desc.m_ScaleW = m["w_scale"].AsFloat();
2229 desc.m_ScaleX = m["x_scale"].AsFloat();
2230 desc.m_ScaleY = m["y_scale"].AsFloat();
2231
keidav0107d58c72019-02-26 11:57:39 +00002232 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002233 {
keidav0107d58c72019-02-26 11:57:39 +00002234 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002235 }
2236 if (!(m["detections_per_class"].IsNull()))
2237 {
2238 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2239 }
2240
2241 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2242 {
2243 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2244 "must be positive and less than or equal to 1.");
2245 }
2246
2247 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2248 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2249 armnn::Optional<armnn::PermutationVector&>());
2250
2251 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2252 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2253 layerName.c_str());
2254
2255 BOOST_ASSERT(layer != nullptr);
2256
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002257 // The model does not specify the output shapes.
2258 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2259 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2260 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2261 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2262 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2263 m_OverridenOutputShapes.push_back({ 1 });
2264
keidav011b3e2ea2019-02-21 10:07:37 +00002265 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2266 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002267 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002268 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2269 }
2270
2271 // Register the input connection slots for the layer, connections are made after all layers have been created
2272 // only the tensors for the inputs are relevant, exclude the const tensors
2273 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2274 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2275
2276 // Register the output connection slots for the layer, connections are made after all layers have been created
2277 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2278 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2279 outputTensorIndexes[1],
2280 outputTensorIndexes[2],
2281 outputTensorIndexes[3]});
2282}
2283
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002284/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2285void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2286{
2287 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2288
2289 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2290 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2291 CHECK_VALID_SIZE(outputs.size(), 1);
2292
2293 if (inputs.size() < 1)
2294 {
2295 throw ParseException("Pack must have at least one input.");
2296 }
2297
2298 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2299 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2300
2301 StackDescriptor desc;
2302 desc.m_Axis = static_cast<uint32_t>(options->axis);
2303 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2304
2305 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2306 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2307 desc.m_InputShape = inputTensorInfo.GetShape();
2308
2309 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2310 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2311
2312 BOOST_ASSERT(layer != nullptr);
2313
2314 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2315 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2316
2317 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2318 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2319
2320 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2321 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2322}
2323
Nina Drozd200e3802019-04-15 09:47:39 +01002324void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2325{
2326 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2327
2328 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2329 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2330
2331 // This unpackAxis indicates the axis to unpack
2332 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2333
2334 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2335 CHECK_VALID_SIZE(inputs.size(), 1);
2336
2337 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002338
2339 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2340 {
2341 throw ParseException(
2342 boost::str(
2343 boost::format(
2344 "The unpack axis: %1% cannot be greater than or equal to "
2345 "the number of input dimension %2% %3%")
2346 % unpackAxis
2347 % inputTensorInfo.GetNumDimensions()
2348 % CHECK_LOCATION().AsString()));
2349 }
2350
Nina Drozd200e3802019-04-15 09:47:39 +01002351 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2352 // If num is not defined, automatically infer from the length of the dimension axis.
2353 if(unpackNum == 0)
2354 {
2355 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2356 }
2357
2358 // If unpack number cannot be inferred and is still zero, throw ParseException.
2359 if(unpackNum == 0)
2360 {
2361 throw ParseException("Number to unpack must greater than zero.");
2362 }
2363
2364 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2365 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2366
2367 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2368 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2369
2370 // Add current input shape to unpackDimSizes
2371 for (unsigned int i = 0; i < inputDimSize; ++i)
2372 {
2373 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2374 }
2375
2376 if (unpackDimSizes[unpackAxis] != unpackNum)
2377 {
2378 throw ParseException("Number to unpack must be the same as length of the dimension to "
2379 "unpack along.");
2380 }
2381
2382 unpackDimSizes[unpackAxis] /= unpackNum;
2383
2384 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2385 for (unsigned int j = 0; j < unpackNum; ++j)
2386 {
2387 // Set the size of the views.
2388 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2389 {
2390 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2391 }
2392 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2393 }
2394
2395 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2396 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2397
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002398 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2399 unpackDimSizes.data());
2400
Nina Drozd200e3802019-04-15 09:47:39 +01002401 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2402 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2403
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002404 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2405 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2406 {
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002407 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002408 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2409 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002410 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002411 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2412
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002413 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2414 outputTensorInfo.GetDataType(),
2415 outputTensorInfo.GetQuantizationScale(),
2416 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002417 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2418
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002419 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002420
2421 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2422 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2423 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2424 }
Nina Drozd200e3802019-04-15 09:47:39 +01002425}
2426
Nina Drozd0324f482019-04-08 10:52:10 +01002427void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2428{
2429 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2430
2431 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2432 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2433
2434 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2435
Nina Drozd200e3802019-04-15 09:47:39 +01002436 // If number of splits cannot be inferred and is zero, throw ParseException.
2437 if(numSplits == 0)
2438 {
2439 throw ParseException("Number to splits must greater than zero.");
2440 }
2441
Nina Drozd0324f482019-04-08 10:52:10 +01002442 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2443 CHECK_VALID_SIZE(inputs.size(), 2);
2444 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2445 CHECK_VALID_SIZE(outputs.size(), numSplits);
2446
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002447 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2448 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002449
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002450 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2451 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2452 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2453
2454 BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
2455 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002456
2457 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2458 if (splitDim == 0 || splitDim == 2)
2459 {
2460 throw ParseException(
2461 boost::str(
2462 boost::format(
2463 "Dimension %1% for split is not supported by Armnn. %2%")
2464 % splitDim
2465 % CHECK_LOCATION().AsString()));
2466 }
2467
2468 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002469 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002470 {
2471 throw ParseException(
2472 boost::str(
2473 boost::format(
2474 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002475 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002476 % inputTensorInfo.GetNumDimensions()
2477 % MaxNumOfTensorDimensions
2478 % CHECK_LOCATION().AsString()));
2479 }
2480
2481 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2482
2483 // Add current input shape to splitterDimSizes
2484 for (unsigned int i = 0; i < inputDimSize; ++i)
2485 {
2486 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2487 }
2488
2489 if (splitterDimSizes[splitDim] % numSplits != 0)
2490 {
2491 throw ParseException("Number of splits must evenly divide the dimension");
2492 }
2493 splitterDimSizes[splitDim] /= numSplits;
2494
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002495 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002496 for (unsigned int j = 0; j < numSplits; ++j)
2497 {
2498 // Set the size of the views.
2499 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2500 {
2501 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2502 }
2503 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2504 }
2505
2506 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2507 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2508
2509 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002510 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002511
Nina Drozd0324f482019-04-08 10:52:10 +01002512 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2513 {
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002514 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k]);
2515 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002516 }
2517
2518 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2519 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2520}
2521
Sadik Armagan58f39192018-09-17 14:14:39 +01002522armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2523 unsigned int outputSlot,
2524 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002525{
2526 ActivationDescriptor activationDesc;
2527 std::string layerName = prevLayer->GetName();
2528
2529 switch(activationType)
2530 {
2531 case tflite::ActivationFunctionType_NONE:
2532 {
2533 // this is a no-op: return previous layer
2534 return prevLayer;
2535 }
2536 case tflite::ActivationFunctionType_RELU:
2537 {
2538 activationDesc.m_Function = ActivationFunction::ReLu;
2539 layerName += ":RELU";
2540 break;
2541 }
2542 case tflite::ActivationFunctionType_RELU6:
2543 {
2544 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2545 activationDesc.m_A = 6.0f;
2546 activationDesc.m_B = 0.0f;
2547 layerName += ":RELU6";
2548 break;
2549 }
2550 case tflite::ActivationFunctionType_TANH:
2551 {
2552 activationDesc.m_Function = ActivationFunction::TanH;
2553 activationDesc.m_A = 1.0f;
2554 activationDesc.m_B = 1.0f;
2555 layerName += ":TANH";
2556 break;
2557 }
2558
2559 // I only put these here as a reminder what others we could support
2560 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2561 case tflite::ActivationFunctionType_SIGN_BIT:
2562 default:
2563 {
2564 throw ParseException(
2565 boost::str(
2566 boost::format("TfLite parser doesn't suppport fused activation: "
2567 "%1%/%2% %3% ") %
2568 activationType %
2569 tflite::EnumNameActivationFunctionType(activationType) %
2570 CHECK_LOCATION().AsString()));
2571
2572 }
2573 }
2574
2575 IConnectableLayer* activationLayer =
2576 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2577
2578 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2579 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2580 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2581 return activationLayer;
2582}
2583
2584TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2585{
2586 if (fileName == nullptr)
2587 {
2588 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2589 CHECK_LOCATION().AsString()));
2590 }
2591 boost::system::error_code errorCode;
2592 boost::filesystem::path pathToFile(fileName);
2593 if (!boost::filesystem::exists(pathToFile, errorCode))
2594 {
2595 throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2596 fileName %
2597 errorCode %
2598 CHECK_LOCATION().AsString()));
2599 }
2600 std::ifstream file(fileName, std::ios::binary);
2601 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2602 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2603 fileContent.size());
2604}
2605
2606TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2607{
2608 if (binaryContent == nullptr)
2609 {
2610 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2611 CHECK_LOCATION().AsString()));
2612 }
2613 flatbuffers::Verifier verifier(binaryContent, len);
2614 if (verifier.VerifyBuffer<tflite::Model>() == false)
2615 {
2616 throw ParseException(
2617 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2618 "flatbuffers format. size:%1% %2%") %
2619 len %
2620 CHECK_LOCATION().AsString()));
2621 }
2622 return tflite::UnPackModel(binaryContent);
2623}
2624
2625TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2626 size_t subgraphIndex,
2627 size_t operatorIndex)
2628{
2629 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2630
Derek Lambertiff05cc52019-04-26 13:05:17 +01002631 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2632 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002633
2634 size_t inputCount = operatorPtr->inputs.size();
2635 TensorRawPtrVector result(inputCount);
2636 for (size_t i=0; i<inputCount; ++i)
2637 {
2638 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002639 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002640 }
2641 return result;
2642}
2643
2644TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2645 size_t subgraphIndex,
2646 size_t operatorIndex)
2647{
2648 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2649
Derek Lambertiff05cc52019-04-26 13:05:17 +01002650 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2651 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002652
2653 size_t outputCount = operatorPtr->outputs.size();
2654 TensorRawPtrVector result(outputCount);
2655 for (size_t i=0; i<outputCount; ++i)
2656 {
2657 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2658 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002659 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002660 }
2661 return result;
2662}
2663
2664TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
2665 size_t subgraphIndex)
2666{
2667 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002668 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002669
Derek Lambertiff05cc52019-04-26 13:05:17 +01002670 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002671 TensorIdRawPtrVector result(inputCount);
2672 for (size_t i=0; i<inputCount; ++i)
2673 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002674 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01002675 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002676 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002677 }
2678 return result;
2679}
2680
2681TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
2682 size_t subgraphIndex)
2683{
2684 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002685 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002686
Derek Lambertiff05cc52019-04-26 13:05:17 +01002687 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01002688 TensorIdRawPtrVector result(outputCount);
2689 for (size_t i=0; i<outputCount; ++i)
2690 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002691 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2692 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01002693 }
2694 return result;
2695}
2696
2697std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
2698 size_t subgraphIndex,
2699 size_t operatorIndex)
2700{
2701 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002702 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2703 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002704 return operatorPtr->inputs;
2705}
2706
2707std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
2708 size_t subgraphIndex,
2709 size_t operatorIndex)
2710{
2711 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002712 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2713 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002714 return operatorPtr->outputs;
2715}
2716
2717void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
2718 size_t operatorIndex,
2719 IConnectableLayer* layer,
2720 const std::vector<unsigned int>& tensorIndexes)
2721{
2722 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2723 BOOST_ASSERT(layer != nullptr);
2724 if (tensorIndexes.size() != layer->GetNumInputSlots())
2725 {
2726 throw ParseException(
2727 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
2728 " for subgraph:%3% operator index:%4% %5%") %
2729 tensorIndexes.size() %
2730 layer->GetNumInputSlots() %
2731 subgraphIndex %
2732 operatorIndex %
2733 CHECK_LOCATION().AsString()));
2734 }
2735
2736 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
2737 {
2738 unsigned int tensorIndex = tensorIndexes[slotIndex];
2739 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
2740 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2741 }
2742}
2743
2744void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
2745 size_t operatorIndex,
2746 IConnectableLayer* layer,
2747 const std::vector<unsigned int>& tensorIndexes)
2748{
2749 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2750 BOOST_ASSERT(layer != nullptr);
2751 if (tensorIndexes.size() != layer->GetNumOutputSlots())
2752 {
2753 throw ParseException(
2754 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
2755 " for subgraph:%3% operator index:%4% %5%") %
2756 tensorIndexes.size() %
2757 layer->GetNumOutputSlots() %
2758 subgraphIndex %
2759 operatorIndex %
2760 CHECK_LOCATION().AsString()));
2761 }
2762
2763 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
2764 {
2765 unsigned int tensorIndex = tensorIndexes[slotIndex];
2766 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
2767 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2768 }
2769}
2770
2771void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
2772{
2773 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2774
2775 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
2776 for (auto const & tensorIdAndPtr : inputs)
2777 {
2778 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2779 IConnectableLayer* layer =
2780 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2781
2782 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
2783 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2784
2785 RegisterOutputSlots(subgraphIndex,
2786 VIRTUAL_OPERATOR_ID,
2787 layer,
2788 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2789 }
2790}
2791
2792void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
2793{
2794 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2795
2796 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
2797 for (auto const & tensorIdAndPtr : outputs)
2798 {
2799 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2800 IConnectableLayer* layer =
2801 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2802
2803 RegisterInputSlots(subgraphIndex,
2804 VIRTUAL_OPERATOR_ID,
2805 layer,
2806 { static_cast<uint32_t>(tensorIdAndPtr.first) });
2807 }
2808}
2809
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002810void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
2811{
2812 CHECK_SUBGRAPH(m_Model, subgraphIndex);
2813
Derek Lambertiff05cc52019-04-26 13:05:17 +01002814 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002815 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2816 {
2817 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2818 {
2819 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
2820 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2821 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01002822 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02002823 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
2824 auto tensorAndData = CreateConstTensor(tensorPtr,
2825 tensorInfo,
2826 armnn::Optional<armnn::PermutationVector&>());
2827
2828 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
2829 IConnectableLayer *layer =
2830 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2831
2832 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2833 RegisterOutputSlots(subgraphIndex,
2834 VIRTUAL_OPERATOR_ID,
2835 layer,
2836 { tensorIndex });
2837
2838 }
2839 }
2840 }
2841}
2842
telsoa01c577f2c2018-08-31 09:22:23 +01002843// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2844TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
2845{
2846 CHECK_BUFFER(model, bufferIndex);
2847 return model->buffers[bufferIndex].get();
2848}
2849
Matteo Martincigh747ef822018-12-18 09:26:39 +00002850template<typename T>
2851std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2852TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
2853 TfLiteParser::TensorRawPtr tensorPtr,
2854 armnn::TensorInfo& tensorInfo,
2855 armnn::Optional<armnn::PermutationVector&> permutationVector)
2856{
2857 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2858 tensorPtr,
2859 tensorInfo,
2860 permutationVector);
2861 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2862 return std::make_pair(constData.first, std::move(storage));
2863}
2864
telsoa01c577f2c2018-08-31 09:22:23 +01002865std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2866TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00002867 armnn::TensorInfo& tensorInfo,
2868 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01002869{
2870 CHECK_TENSOR_PTR(tensorPtr);
2871 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
2872 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
2873
2874 switch (tensorInfo.GetDataType())
2875 {
2876 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002877 return CreateConstTensorAndStoreData<float>(bufferPtr,
2878 tensorPtr,
2879 tensorInfo,
2880 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00002881 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002882 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2883 tensorPtr,
2884 tensorInfo,
2885 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00002886 case armnn::DataType::QSymmS8:
2887 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
2888 tensorPtr,
2889 tensorInfo,
2890 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00002891 case armnn::DataType::QAsymmS8:
2892 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
2893 tensorPtr,
2894 tensorInfo,
2895 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002896 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00002897 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2898 tensorPtr,
2899 tensorInfo,
2900 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01002901 default:
2902 {
2903 std::stringstream errString;
2904 errString << "Unexpected datatype when creating const tensor: "
2905 << armnn::GetDataTypeName(tensorInfo.GetDataType())
2906 << " shape:" << tensorInfo.GetShape()
2907 << CHECK_LOCATION().AsString();
2908 throw ParseException(errString.str());
2909 }
2910 }
2911}
2912
2913BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
2914 const std::string& name) const
2915{
2916 CHECK_SUBGRAPH(m_Model, subgraphId);
2917 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2918 for (auto const & input : inputs)
2919 {
2920 if (input.second->name == name)
2921 {
2922 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2923 return std::make_pair(bindingId, ToTensorInfo(input.second));
2924 }
2925 }
2926
2927 std::stringstream bindings;
2928 for (auto const & input : inputs)
2929 {
2930 bindings << "'" << input.second->name << "' ";
2931 }
2932
2933 throw ParseException(
2934 boost::str(
2935 boost::format("No input binding found for subgraph:%1% and name:%2%. "
2936 "Possible inputs are: [%3%] %4%") %
2937 subgraphId %
2938 name %
2939 bindings.str() %
2940 CHECK_LOCATION().AsString()));
2941}
2942
2943BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
2944 const std::string& name) const
2945{
2946 CHECK_SUBGRAPH(m_Model, subgraphId);
2947 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002948 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01002949 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002950 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01002951 if (output.second->name == name)
2952 {
2953 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002954 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2955 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2956 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01002957 }
2958 }
2959
2960 std::stringstream bindings;
2961 for (auto const & output : outputs)
2962 {
2963 bindings << "'" << output.second->name << "' ";
2964 }
2965
2966 throw ParseException(
2967 boost::str(
2968 boost::format("No output binding found for subgraph:%1% and name:%2%. "
2969 "Possible outputs are: [%3%] %4%") %
2970 subgraphId %
2971 name %
2972 bindings.str() %
2973 CHECK_LOCATION().AsString()));
2974}
2975
2976size_t TfLiteParser::GetSubgraphCount() const
2977{
2978 return m_Model->subgraphs.size();
2979}
2980
2981std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
2982{
2983 CHECK_SUBGRAPH(m_Model, subgraphId);
2984 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2985 std::vector<std::string> result;
2986 result.reserve(inputs.size());
2987 for (auto const & input : inputs)
2988 {
2989 result.push_back(input.second->name);
2990 }
2991 return result;
2992}
2993
2994std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
2995{
2996 CHECK_SUBGRAPH(m_Model, subgraphId);
2997 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2998 std::vector<std::string> result;
2999 result.reserve(outputs.size());
3000 for (auto const & output : outputs)
3001 {
3002 result.push_back(output.second->name);
3003 }
3004 return result;
3005}
3006
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003007ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003008{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003009 return new TfLiteParser(options);
telsoa01c577f2c2018-08-31 09:22:23 +01003010}
3011
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003012ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003013{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003014 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +01003015}
3016
3017void ITfLiteParser::Destroy(ITfLiteParser* parser)
3018{
3019 delete parser;
3020}
3021
3022TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
3023: m_FloatData(std::move(data))
3024, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003025, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003026, m_Int32Data(nullptr)
3027{
3028}
3029
3030TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
3031: m_FloatData(nullptr)
3032, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00003033, m_Int8Data(nullptr)
3034, m_Int32Data(nullptr)
3035{
3036}
3037
3038TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
3039: m_FloatData(nullptr)
3040, m_Uint8Data(nullptr)
3041, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01003042, m_Int32Data(nullptr)
3043{
3044}
3045
3046TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
3047: m_FloatData(nullptr)
3048, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003049, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003050, m_Int32Data(std::move(data))
3051{
3052}
3053
3054} // armnnTfLiteParser