blob: 8bc475347c352b450283393ee9e6e679f060de9e [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kellyc5789ca2020-07-06 19:24:15 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Sadik Armagand109a4d2020-07-28 10:42:13 +01008#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +00009#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010010#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000011#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010012#include <armnn/Tensor.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010013#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010014#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000015#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010016#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010017
18// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000019#include <armnnUtils/Permute.hpp>
Francis Murtagh532a29d2020-06-29 11:50:01 +010020#include <Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000021
Sadik Armagan479045b2018-10-01 11:51:37 +010022#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010023#include <VerificationHelpers.hpp>
24
25// The generated code based on the Tf Lite schema:
26#include <schema_generated.h>
27
Matteo Martincighe011d202019-11-28 11:35:47 +000028#include <flatbuffers/flexbuffers.h>
29
telsoa01c577f2c2018-08-31 09:22:23 +010030#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010031#include <boost/numeric/conversion/cast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010032
33#include <fstream>
34#include <algorithm>
35#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010036#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000037#include <sstream>
38
39#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
40 { \
41 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
42 << ": " \
43 << CHECK_LOCATION().AsString()).str()); \
44 }
telsoa01c577f2c2018-08-31 09:22:23 +010045
46using namespace armnn;
47using armnn::CheckLocation;
48namespace armnnTfLiteParser
49{
50namespace
51{
jimfly01c25411c2018-11-14 17:47:22 +000052
telsoa01c577f2c2018-08-31 09:22:23 +010053const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
54
55void CheckSubgraph(const TfLiteParser::ModelPtr & model,
56 size_t subgraphIndex,
57 const CheckLocation & location)
58{
59 if (model.get() == nullptr)
60 {
61 throw ParseException(
62 boost::str(
63 boost::format("%1% was called with invalid (null) model. "
64 "Possible reason is that the model is not yet loaded and Unpack(ed). "
65 "subgraph:%2% at %3%") %
66 location.m_Function %
67 subgraphIndex %
68 location.FileLine()));
69 }
70 else if (subgraphIndex >= model->subgraphs.size())
71 {
72 throw ParseException(
73 boost::str(
74 boost::format("%1% was called with an invalid subgraph index. "
75 "subgraph:%2% at %3%") %
76 location.m_Function %
77 subgraphIndex %
78 location.FileLine()));
79 }
80}
81
82#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
83 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
84
85void CheckModel(const TfLiteParser::ModelPtr & model,
86 size_t subgraphIndex,
87 size_t operatorIndex,
88 const CheckLocation & location)
89{
90 if (model.get() == nullptr)
91 {
92 throw ParseException(
93 boost::str(
94 boost::format("%1% was called with invalid (null) model. "
95 "Possible reason is that the model is not yet loaded and Unpack(ed). "
96 "subgraph:%2% operator:%3% at %4%") %
97 location.m_Function %
98 subgraphIndex %
99 operatorIndex %
100 location.FileLine()));
101 }
102 else if (subgraphIndex >= model->subgraphs.size())
103 {
104 throw ParseException(
105 boost::str(
106 boost::format("%1% was called with an invalid subgraph index. "
107 "subgraph:%2% operator:%3% at %4%") %
108 location.m_Function %
109 subgraphIndex %
110 operatorIndex %
111 location.FileLine()));
112 }
113 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
114 operatorIndex != VIRTUAL_OPERATOR_ID)
115 {
116 throw ParseException(
117 boost::str(
118 boost::format("%1% was called with an invalid operator index. "
119 "subgraph:%2% operator:%3% at %4%") %
120 location.m_Function %
121 subgraphIndex %
122 operatorIndex %
123 location.FileLine()));
124 }
125}
126
127#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
128 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
129
130void CheckTensor(const TfLiteParser::ModelPtr & model,
131 size_t subgraphIndex,
132 size_t tensorIndex,
133 const CheckLocation & location)
134{
135 // not checking model, because I assume CHECK_MODEL already run
136 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100137 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100138
139 // also subgraph index should be checked by CHECK_MODEL so
140 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100141 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100142
143 // the tensor index is the only one to check here
144 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
145 {
146 throw ParseException(
147 boost::str(
148 boost::format("%1% was called with an invalid tensor index. "
149 "subgraph:%2% tensor:%3% at %4%") %
150 location.m_Function %
151 subgraphIndex %
152 tensorIndex %
153 location.FileLine()));
154 }
155}
156
157#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
158 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
159
160void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
161 const CheckLocation & location)
162{
163 if (rawPtr == nullptr)
164 {
165 throw ParseException(
166 boost::str(
167 boost::format("%1% was called with a null tensor pointer. "
168 "at %2%") %
169 location.m_Function %
170 location.FileLine()));
171
172 }
173}
174
175#define CHECK_TENSOR_PTR(TENSOR_PTR) \
176 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
177
178void CheckBuffer(const TfLiteParser::ModelPtr & model,
179 size_t bufferIndex,
180 const CheckLocation & location)
181{
182 if (model.get() == nullptr)
183 {
184 throw ParseException(
185 boost::str(
186 boost::format("%1% was called with invalid (null) model. "
187 "Possible reason is that the model is not yet loaded and Unpack(ed). "
188 "buffer:%2% at %3%") %
189 location.m_Function %
190 bufferIndex %
191 location.FileLine()));
192 }
193 else if (bufferIndex >= model->buffers.size())
194 {
195 throw ParseException(
196 boost::str(
197 boost::format("%1% was called with an invalid buffer index. "
198 "buffer index:%2% at %3%") %
199 location.m_Function %
200 bufferIndex %
201 location.FileLine()));
202 }
203 else if (model->buffers[bufferIndex].get() == nullptr)
204 {
205 throw ParseException(
206 boost::str(
207 boost::format("The buffer #%1% is null. %3%") %
208 bufferIndex %
209 location.AsString()));
210 }
211}
212
213#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
214 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
215
216void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
217 const armnn::TensorInfo & tensorInfo,
218 uint32_t bufferId,
219 const CheckLocation & location)
220{
221 if (bufferPtr == nullptr)
222 {
223 throw ParseException(
224 boost::str(
225 boost::format("BufferPtr is null for buffer:%1%. %2%") %
226 bufferId %
227 location.AsString()));
228 }
229 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
230 tensorInfo.GetNumBytes() > bufferPtr->data.size())
231 {
232 std::stringstream ss;
233 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
234 << "For tensor: " << tensorInfo.GetShape()
235 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
236 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
237 throw ParseException(ss.str());
238 }
239}
240
241#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
242 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
243
244bool IsActivationSupported(tflite::ActivationFunctionType activationType)
245{
246 switch(activationType)
247 {
248 case tflite::ActivationFunctionType_NONE:
249 case tflite::ActivationFunctionType_RELU:
250 case tflite::ActivationFunctionType_RELU6:
251 case tflite::ActivationFunctionType_TANH:
252 {
253 return true;
254 }
255 default:
256 {
257 return false;
258 }
259 }
260}
261
262#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
263 do { \
264 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
265 { \
266 throw ParseException( \
267 boost::str( \
268 boost::format("TfLite parser doesn't suppport fused activation: " \
269 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \
270 OPTION->fused_activation_function % \
271 tflite::EnumNameActivationFunctionType(\
272 OPTION->fused_activation_function) % \
273 __func__ % \
274 SUBGRAPH_INDEX % \
275 OPERATOR_INDEX % \
276 CHECK_LOCATION().FileLine())); \
277 } \
278 } while(false)
279
280
281std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
282{
283 std::vector<unsigned int> result;
284 result.reserve(in.size());
285 for (auto & i : in)
286 {
287 result.push_back(CHECKED_NON_NEGATIVE(i));
288 }
289 return result;
290}
291
292void CalcPadding(uint32_t inputSize,
293 uint32_t filterSize,
294 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100295 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100296 uint32_t& paddingFront,
297 uint32_t& paddingBack,
298 tflite::Padding padding)
299{
300 paddingFront = 0;
301 paddingBack = 0;
302 if (padding == tflite::Padding_SAME)
303 {
304 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100305 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
306 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100307 if (temp > inputSize)
308 {
309 paddingFront = (temp - inputSize) / 2;
310 paddingBack = (temp - inputSize) - paddingFront;
311 }
312 }
313}
314
Sadik Armagand109a4d2020-07-28 10:42:13 +0100315armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
316 const std::vector<unsigned int>& shapes,
317 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3},
318 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100319{
320 armnn::DataType type;
321 CHECK_TENSOR_PTR(tensorPtr);
322
323 switch (tensorPtr->type)
324 {
325 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000326 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100327 break;
328 case tflite::TensorType_FLOAT32:
329 type = armnn::DataType::Float32;
330 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000331 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000332 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000333 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000334 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000335 type = armnn::DataType::QAsymmS8;
336 }
337 else
338 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000339 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000340 type = armnn::DataType::QSymmS8;
341 }
Finn Williamsed66d142019-12-06 09:55:55 +0000342 break;
343 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000344 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000345 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100346 case tflite::TensorType_INT32:
347 type = armnn::DataType::Signed32;
348 break;
349
350 default:
351 {
352 CheckLocation location = CHECK_LOCATION();
353 throw ParseException(
354 boost::str(
355 boost::format("Unsupported data type %1% = %2% for tensor: %3%. %4%") %
356 tensorPtr->type %
357 tflite::EnumNameTensorType(tensorPtr->type) %
358 tensorPtr->name %
359 location.AsString()));
360 }
361 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100362 std::vector<unsigned int> safeShape = shapes;
Sadik Armagand109a4d2020-07-28 10:42:13 +0100363 bool isDynamic = false;
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100364 if (safeShape.size() == 0)
365 {
366 safeShape.push_back(1);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100367 if (outputTensor)
368 {
369 isDynamic = true;
370 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100371 }
372
Keith Davisd305e1a2020-01-22 11:57:54 +0000373 float quantizationScale = 0.0f;
374 int32_t quantizationOffset = 0;
375
376 if (tensorPtr->quantization.get())
377 {
378 if (tensorPtr->quantization->scale.size() <= 1)
379 {
380 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
381 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
382
383 if (tensorPtr->quantization->scale.size() == 1)
384 {
385 quantizationScale = tensorPtr->quantization->scale[0];
386 }
387 if (tensorPtr->quantization->zero_point.size() == 1)
388 {
389 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000390 // but this is what we support at the moment in ArmNN
Keith Davisd305e1a2020-01-22 11:57:54 +0000391 quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
392 }
393
Sadik Armagand109a4d2020-07-28 10:42:13 +0100394 TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
395 safeShape.data());
396 if (isDynamic)
397 {
398 tensorShape = TensorShape(1, false);
399 }
400 armnn::TensorInfo result(tensorShape,
401 type,
402 quantizationScale,
403 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000404 return result;
405 }
406 else
407 {
408 std::vector<float> quantizationScales;
409 std::vector<int32_t> quantizationOffsets;
410
411 // Scale
412 std::copy(tensorPtr->quantization->scale.begin(),
413 tensorPtr->quantization->scale.end(),
414 std::back_inserter(quantizationScales));
415
Keith Davis0c2eeac2020-02-11 16:51:50 +0000416 // QSymmS8 Per-axis
Sadik Armagand109a4d2020-07-28 10:42:13 +0100417 TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
418 safeShape.data());
419 if (isDynamic)
420 {
421 tensorShape = TensorShape(1, false);
422 }
423 armnn::TensorInfo result(tensorShape,
424 type,
425 quantizationScales,
426 dimensionMappings[boost::numeric_cast<unsigned int>(
427 tensorPtr->quantization->quantized_dimension)]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000428 return result;
429 }
430 }
431 else
432 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100433 TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
434 safeShape.data());
435 if (isDynamic)
436 {
437 tensorShape = TensorShape(1, false);
438 }
439 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000440 type,
441 quantizationScale,
442 quantizationOffset);
443 return result;
444 }
telsoa01c577f2c2018-08-31 09:22:23 +0100445}
446
Keith Davis0c2eeac2020-02-11 16:51:50 +0000447armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
448 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000449{
450 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000451 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000452}
453
Sadik Armagand109a4d2020-07-28 10:42:13 +0100454armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
455 const bool outputTensor)
456{
457 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
458 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3};
459 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings, outputTensor);
460}
461
telsoa01c577f2c2018-08-31 09:22:23 +0100462template<typename T>
463std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
464CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
465 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000466 armnn::TensorInfo& tensorInfo,
467 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100468{
Jan Eilers8eb25602020-03-09 12:13:48 +0000469 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100470 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
471 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
telsoa01c577f2c2018-08-31 09:22:23 +0100472 boost::str(
473 boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
474
475 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000476
477 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
478 {
479 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000480 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
481 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000482 }
483 else
484 {
485 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
486 }
487
telsoa01c577f2c2018-08-31 09:22:23 +0100488 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
489}
490
telsoa01c577f2c2018-08-31 09:22:23 +0100491armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
492{
493 // generate the binding id by shifting the tensor id by 8 bit
494 // and add the subgraph id, which allows 256 subgraphs
495 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
496}
497
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000498bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
499{
500 const unsigned int actualSize = actual.GetNumDimensions();
501 if (actualSize != expected.size())
502 {
503 return false;
504 }
505
506 for (unsigned int i = 0u; i < actualSize; i++)
507 {
508 if (expected[i] < 0 ||
509 actual[i] != static_cast<unsigned int>(expected[i]))
510 {
511 return false;
512 }
513 }
514
515 return true;
516}
517
James Conroy05102392020-06-24 15:39:55 +0100518void CheckMatchingQuantization(const TensorInfo& first,
519 const TensorInfo& second,
520 const std::string& descName,
521 std::string const& firstName,
522 std::string const& secondName)
523{
524 if (!first.IsQuantized() ||
525 !second.IsQuantized())
526 {
527 // Not a quantized type, ignore the validation
528 return;
529 }
530
531 DataType firstDataType = first.GetDataType();
532 DataType secondDataType = second.GetDataType();
533
534 if (firstDataType != secondDataType)
535 {
536 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
537 " must be of the same quantized type, " +
538 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
539 secondName + " is " + GetDataTypeName(secondDataType));
540 }
541
542 if (!first.IsTypeSpaceMatch(second))
543 {
544 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
545 " must have the same quantization space, " +
546 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
547 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
548 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
549 " and scale " + std::to_string(second.GetQuantizationScale()));
550 }
551}
552
telsoa01c577f2c2018-08-31 09:22:23 +0100553} // <anonymous>
554
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100555TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
556: m_Options(options)
557, m_Network(nullptr, nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +0100558, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
559{
560 // register supported operators
Sadik Armagan66dedc72019-12-10 16:32:07 +0000561 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000562 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
563 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
564 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
565 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000566 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000567 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
Finn Williamsed66d142019-12-06 09:55:55 +0000568 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
Derek Lambertif0176992020-04-28 13:37:49 +0100569 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParser::ParseExp;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000570 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Jan Eilers2f746b32020-07-28 14:00:06 +0100571 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParser::ParseHardSwish;
Sadik Armagan12239e72020-05-27 11:06:17 +0100572 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParser::ParseLeakyRelu;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000573 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
574 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
575 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
576 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000577 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000578 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000579 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Darshan Patel83fcf982020-05-26 22:22:42 +0530580 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParser::ParseNeg;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000581 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
582 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
583 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000584 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
585 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
586 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
587 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
588 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000589 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000590 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
591 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000592 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Derek Lambertif0176992020-04-28 13:37:49 +0100593 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParser::ParseSplitV;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000594 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
595 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
596 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000597 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
598 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
599 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
600 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
Darshan Patel42b3d7d2020-05-25 22:30:07 +0530601 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParser::ParseDiv;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100602 // register supported custom operators
603 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100604}
605
606void TfLiteParser::ResetParser()
607{
608 m_Network = armnn::INetworkPtr(nullptr, nullptr);
609 m_Model = nullptr;
610 m_SubgraphConnections.clear();
611}
612
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200613void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
614 size_t operatorIndex,
615 IConnectableLayer *layer)
616{
617 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100618 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200619
Derek Lambertiff05cc52019-04-26 13:05:17 +0100620 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
621 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200622
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100623 ARMNN_ASSERT(operatorPtr->inputs.size() > 1);
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200624
625 uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100626 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200627 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[1]);
Derek Lambertiff05cc52019-04-26 13:05:17 +0100628 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200629
630 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(tensorPtr);
631 armnn::TensorInfo inputTensorInfo = ToTensorInfo(tensorPtr1);
632
Mike Kellyc5789ca2020-07-06 19:24:15 +0100633 uint32_t inputSlotId = 1;
634 uint32_t reshapeSlotId = 0;
635
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200636 if (inputTensorInfo.GetNumDimensions() < reshapedTensorInfo.GetNumDimensions())
637 {
638 uint32_t id = reshapedInputId;
639 reshapedInputId = inputId;
640 inputId = id;
641
642 reshapedTensorInfo = ToTensorInfo(tensorPtr1);
643 inputTensorInfo = ToTensorInfo(tensorPtr);
Mike Kellyc5789ca2020-07-06 19:24:15 +0100644
645 inputSlotId = 0;
646 reshapeSlotId = 1;
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200647 }
648
649 uint32_t numDimensions = inputTensorInfo.GetNumDimensions();
650
651 std::vector<unsigned> reshapedDim;
652 for (unsigned int i = 0; i < reshapedTensorInfo.GetNumDimensions(); ++i)
653 {
654 reshapedDim.push_back(reshapedTensorInfo.GetShape()[i]);
655 }
656
657 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
658 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
659
660 reshapedTensorInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
661
662 std::string layerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
663 armnn::ReshapeDescriptor desc;
664 desc.m_TargetShape = reshapedTensorInfo.GetShape();
665 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
666
667 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
Mike Kellyc5789ca2020-07-06 19:24:15 +0100668 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(reshapeSlotId));
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200669
670 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
671
Mike Kellyc5789ca2020-07-06 19:24:15 +0100672 armnn::IInputSlot* input1Slot = &(layer->GetInputSlot(inputSlotId));
Bruno Goncalves9c761a62018-12-27 14:20:35 -0200673 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
674}
675
telsoa01c577f2c2018-08-31 09:22:23 +0100676INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
677{
678 ResetParser();
679 m_Model = LoadModelFromFile(graphFile);
680 return CreateNetworkFromModel();
681}
682
683INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
684{
685 ResetParser();
686 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
687 return CreateNetworkFromModel();
688}
689
690INetworkPtr TfLiteParser::CreateNetworkFromModel()
691{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100692
693 using NetworkOptions = std::vector<BackendOptions>;
694 NetworkOptions networkOptions = {};
695 if (m_Options && m_Options.value().m_InferAndValidate)
696 {
697 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
698 {
699 { "InferAndValidate", true }
700 });
701
702 networkOptions.push_back(shapeInferenceMethodOption);
703 }
704
705 m_Network = INetwork::Create(networkOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100706 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100707
telsoa01c577f2c2018-08-31 09:22:23 +0100708 if (m_Model->subgraphs.size() != 1)
709 {
710 throw ParseException(
711 boost::str(
712 boost::format("Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
713 m_Model->subgraphs.size() %
714 CHECK_LOCATION().AsString()));
715 }
716
717 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +0100718 size_t operatorIndex = 0;
719 try
telsoa01c577f2c2018-08-31 09:22:23 +0100720 {
Colm Donelan6350d272020-06-09 16:56:25 +0100721 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100722 {
Colm Donelan6350d272020-06-09 16:56:25 +0100723 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
724 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +0100725 {
Colm Donelan6350d272020-06-09 16:56:25 +0100726 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
telsoa01c577f2c2018-08-31 09:22:23 +0100727 auto builtinCode = opCodePtr->builtin_code;
728
729 if (builtinCode > tflite::BuiltinOperator_MAX)
730 {
Colm Donelan6350d272020-06-09 16:56:25 +0100731 throw ParseException(boost::str(boost::format("Operator code %1% is out of range 0-%2%. "
732 "subgraph:%3% operator idx:%4%. %5%") %
733 builtinCode % tflite::BuiltinOperator_MAX % subgraphIndex %
734 operatorIndex % CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100735 }
736
737 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +0100738 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +0100739 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +0100740 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +0100741 }
telsoa01c577f2c2018-08-31 09:22:23 +0100742
Colm Donelan6350d272020-06-09 16:56:25 +0100743 SetupInputLayers(subgraphIndex);
744 SetupOutputLayers(subgraphIndex);
745 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100746
Colm Donelan6350d272020-06-09 16:56:25 +0100747 ++subgraphIndex;
748 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +0100749 }
telsoa01c577f2c2018-08-31 09:22:23 +0100750 }
Colm Donelan6350d272020-06-09 16:56:25 +0100751 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +0100752 {
Colm Donelan6350d272020-06-09 16:56:25 +0100753 std::stringstream errorString;
754 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
755 << subgraphIndex << " error: " << e.what();
756 ARMNN_LOG(error) << errorString.str();
757 std::stringstream errors;
758 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +0100759 throw ParseException(errors.str());
760 }
761
762 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +0100763 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100764 {
765 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
766 {
767 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
768 {
769 for (size_t inputSlotIdx = 0;
770 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
771 ++inputSlotIdx)
772 {
773 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
774 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
775 }
776 }
777 }
778 }
779
780 return std::move(m_Network);
781}
782
783void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
784 size_t tensorIndex,
785 armnn::IOutputSlot* slot)
786{
787 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100788 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
789 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100790
791 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
792
793 // assuming there is only one producer for that tensor
794 if (tensorSlots.outputSlot != nullptr)
795 {
796 throw ParseException(boost::str(
797 boost::format("Another layer has already registered itself as the producer of "
798 "subgraph:%1% tensor:%2% %3%") %
799 subgraphIndex %
800 tensorIndex %
801 CHECK_LOCATION().AsString()));
802 }
803
804 tensorSlots.outputSlot = slot;
805}
806
807void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
808 size_t tensorIndex,
809 armnn::IInputSlot* slot)
810{
811 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100812 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
813 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100814
815 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
816 tensorSlots.inputSlots.push_back(slot);
817}
818
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100819void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
820{
821 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
822
823 // NOTE: By default we presume the custom operator is not supported
824 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
825
826 // Identify custom code defined for custom operator
827 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
828 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
829
830 // Find parser function that correspondes to custom code (if any)
831 auto iterator = m_CustomParserFunctions.find(customCode);
832 if (iterator != m_CustomParserFunctions.end())
833 {
834 customParserFunction = iterator->second;
835 }
836
837 // Run parser function
838 (this->*customParserFunction)(subgraphIndex, operatorIndex);
839}
840
telsoa01c577f2c2018-08-31 09:22:23 +0100841void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
842{
843 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100844
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100845 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
846
847 auto opcodeIndex = operatorPtr->opcode_index;
848 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
849
850 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
851 {
852 // Do not add StandInLayer, throw ParseException instead
853 throw ParseException(
854 boost::str(
855 boost::format("Operator not supported. "
856 "subgraph:%1% operator:%2% "
857 "opcode_index:%3% opcode:%4% / %5% %6%") %
858 subgraphIndex %
859 operatorIndex %
860 opcodeIndex %
861 opcode %
862 tflite::EnumNameBuiltinOperator(opcode) %
863 CHECK_LOCATION().AsString()));
864 }
865
866 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
867 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
868
869 const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
870 const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
871
872 StandInDescriptor descriptor(numInputs, numOutputs);
873 auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
874
875 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
876 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +0100877 ARMNN_ASSERT(layer != nullptr);
878
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100879 for (unsigned int i = 0u; i < numOutputs; ++i)
880 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100881 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100882 }
883
884 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
885 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
886
887 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
888 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100889}
890
telsoa01c577f2c2018-08-31 09:22:23 +0100891void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
892{
893 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
894
895 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
896 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
897
898 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
899
900 Convolution2dDescriptor desc;
901 desc.m_BiasEnabled = false;
902 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
903 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000904 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100905 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
906 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000907
telsoa01c577f2c2018-08-31 09:22:23 +0100908 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
909 CHECK_VALID_SIZE(inputs.size(), 2, 3);
910
911 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
912 CHECK_VALID_SIZE(outputs.size(), 1);
913
914 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
915 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
916
917 // assuming input is NHWC
918 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
919 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
920
921 // assuming the filter is OHWI : Output, H, W, Input
922 // which is essentially the same as NHWC
923 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
924 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
925
Pablo Tellof0bd6832019-04-26 17:58:13 +0100926 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
927 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
928 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
929 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100930
Matteo Martincigh747ef822018-12-18 09:26:39 +0000931 auto filterTensorAndData = CreateConstTensor(inputs[1],
932 filterTensorInfo,
933 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100934 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100935
936 auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
937
938 if (inputs.size() == 3)
939 {
940 desc.m_BiasEnabled = true;
941 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000942 auto biasTensorAndData = CreateConstTensor(inputs[2],
943 biasTensorInfo,
944 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100945 layer = m_Network->AddConvolution2dLayer(desc,
946 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100947 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100948 layerName.c_str());
949 }
950 else
951 {
952 layer = m_Network->AddConvolution2dLayer(desc,
953 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100954 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100955 layerName.c_str());
956 }
957
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100958 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100959
Sadik Armagand109a4d2020-07-28 10:42:13 +0100960 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +0000961 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100962
963 // register the input connection slots for the layer, connections are made after all layers have been created
964 // only the tensors for the inputs are relevant, exclude the const tensors
965 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000966 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100967
jimfly01c25411c2018-11-14 17:47:22 +0000968 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100969 // register the output connection slots for the layer, connections are made after all layers have been created
970 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
971 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
972}
973
974void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
975{
976 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
977
978 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
979 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
980
981 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
982
983 DepthwiseConvolution2dDescriptor desc;
984 desc.m_BiasEnabled = false;
985 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
986 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000987 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100988 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100989
990 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
991 CHECK_VALID_SIZE(inputs.size(), 2, 3);
992 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
993 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100994 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
995 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000996
Keith Davis0c2eeac2020-02-11 16:51:50 +0000997 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
998 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
999
telsoa01c577f2c2018-08-31 09:22:23 +01001000 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Keith Davis0c2eeac2020-02-11 16:51:50 +00001001 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1], permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01001002
Matteo Martincigh747ef822018-12-18 09:26:39 +00001003 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +01001004 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1005 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +00001006
1007 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +01001008 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1009 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1010
Matteo Martincigh747ef822018-12-18 09:26:39 +00001011 // Reshape weights as [ H, W, I, M ]
1012 filterTensorInfo.SetShape({ filterHeight,
1013 filterWidth,
1014 inputTensorInfo.GetShape()[3],
1015 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
1016
Pablo Tellof0bd6832019-04-26 17:58:13 +01001017 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1018 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1019 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1020 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001021
Matteo Martincigh747ef822018-12-18 09:26:39 +00001022 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001023 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +01001024 auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
1025
1026 if (inputs.size() == 3)
1027 {
1028 desc.m_BiasEnabled = true;
1029 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001030 auto biasTensorAndData = CreateConstTensor(inputs[2],
1031 biasTensorInfo,
1032 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +01001033 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1034 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001035 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +01001036 layerName.c_str());
1037 }
1038 else
1039 {
1040 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1041 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001042 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +01001043 layerName.c_str());
1044 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001045 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001046
Sadik Armagand109a4d2020-07-28 10:42:13 +01001047 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +00001048 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001049
1050 // register the input connection slots for the layer, connections are made after all layers have been created
1051 // only the tensors for the inputs are relevant, exclude the const tensors
1052 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001053 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +01001054
jimfly01c25411c2018-11-14 17:47:22 +00001055 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001056 // register the output connection slots for the layer, connections are made after all layers have been created
1057 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1058 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1059}
1060
Finn Williamsed66d142019-12-06 09:55:55 +00001061void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
1062{
1063 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1064
1065 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1066 CHECK_VALID_SIZE(inputs.size(), 1);
1067
1068 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1069 CHECK_VALID_SIZE(outputs.size(), 1);
1070
1071 auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
1072
1073 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001074 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +00001075
Sadik Armagand109a4d2020-07-28 10:42:13 +01001076 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Finn Williamsed66d142019-12-06 09:55:55 +00001077 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1078
1079 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1080 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1081
1082 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1083 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1084}
1085
Derek Lambertif0176992020-04-28 13:37:49 +01001086void TfLiteParser::ParseExp(size_t subgraphIndex, size_t operatorIndex)
1087{
1088 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1089
1090 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1091 CHECK_VALID_SIZE(inputs.size(), 1);
1092
1093 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1094 CHECK_VALID_SIZE(outputs.size(), 1);
1095
1096 auto layerName = boost::str(boost::format("Exp:%1%:%2%") % subgraphIndex % operatorIndex);
1097
1098 ElementwiseUnaryDescriptor desc;
1099 desc.m_Operation = UnaryOperation::Exp;
1100 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerName.c_str());
1101 ARMNN_ASSERT(layer != nullptr);
1102
Sadik Armagand109a4d2020-07-28 10:42:13 +01001103 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Derek Lambertif0176992020-04-28 13:37:49 +01001104 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1105
1106 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1107 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1108
1109 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1110 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1111}
1112
Keith Davis4cd29a02019-09-09 14:49:20 +01001113void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
1114{
1115 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1116
1117 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001118 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001119
1120 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1121 CHECK_VALID_SIZE(outputs.size(), 1);
1122
Keith Davis4cd29a02019-09-09 14:49:20 +01001123 auto layerName = boost::str(boost::format("Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001124 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001125
josh minorba424d22019-11-13 10:55:17 -06001126 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001127 {
1128 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1129 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001130 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1131 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001132 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001133 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001134
Mike Kelly08759e22020-03-02 11:41:31 +00001135 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001136 }
1137
James Conroy05102392020-06-24 15:39:55 +01001138 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001139 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001140 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001141
James Conroy05102392020-06-24 15:39:55 +01001142 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001143 ARMNN_ASSERT(layer != nullptr);
Keith Davis4cd29a02019-09-09 14:49:20 +01001144 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1145
1146 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1147 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1148
1149 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1150 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1151}
1152
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001153void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
1154{
1155 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1156
1157 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1158 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1159
1160 TransposeConvolution2dDescriptor desc;
1161 desc.m_BiasEnabled = false;
1162 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1163 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1164 desc.m_DataLayout = armnn::DataLayout::NHWC;
1165
1166 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001167 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001168
1169 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1170 CHECK_VALID_SIZE(outputs.size(), 1);
1171
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001172 if (inputs[0])
1173 {
1174 armnn::TensorInfo tensorInfo = ToTensorInfo(inputs[0]);
1175 std::vector<int> output_shape(tensorInfo.GetNumElements());
1176 if (tensorInfo.GetDataType() == DataType::Signed32)
1177 {
1178 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1179 }
1180 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1181 {
1182 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1183 {
1184 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1185 }
1186 }
1187 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1188 for (int dimension : output_shape)
1189 {
1190 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1191 }
1192 desc.m_OutputShapeEnabled = true;
1193 }
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001194 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001195 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1196
1197 // TfLite uses NHWC tensors
1198 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1199 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1200
1201 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1202 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1203
1204 CalcPadding(inputHeight,
1205 filterHeight,
1206 desc.m_StrideY,
1207 1, // DilationY
1208 desc.m_PadTop,
1209 desc.m_PadBottom,
1210 options->padding);
1211
1212 CalcPadding(inputWidth,
1213 filterWidth,
1214 desc.m_StrideX,
1215 1, // DilationX
1216 desc.m_PadLeft,
1217 desc.m_PadRight,
1218 options->padding);
1219
1220 auto filterTensorAndData = CreateConstTensor(inputs[1],
1221 filterTensorInfo,
1222 armnn::Optional<armnn::PermutationVector&>());
1223
1224 armnn::IConnectableLayer* layer = nullptr;
1225 auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
1226
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001227 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1228 filterTensorAndData.first,
1229 EmptyOptional(),
1230 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001231
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001232 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001233
Sadik Armagand109a4d2020-07-28 10:42:13 +01001234 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001235 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1236
1237 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1238 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001239 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001240
1241 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1242 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1243}
1244
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001245void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
1246{
1247 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1248}
1249
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001250void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
1251{
1252 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1253
1254 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1255 CHECK_VALID_SIZE(inputs.size(), 3);
1256
1257 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1258 CHECK_VALID_SIZE(outputs.size(), 1);
1259
1260 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1261 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1262
1263 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1264 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1265
1266 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1267 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1268
1269 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1270 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1271
1272 size_t step = 2;
1273 std::vector<std::pair<unsigned int, unsigned int>> crops;
1274 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1275 {
1276 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1277 }
1278
1279 armnn::BatchToSpaceNdDescriptor desc;
1280 desc.m_BlockShape = blockShape;
1281 desc.m_Crops = crops;
1282 desc.m_DataLayout = armnn::DataLayout::NHWC;
1283
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001284 auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001285
James Conroy05102392020-06-24 15:39:55 +01001286 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001287 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001288 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1289
1290 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1291 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001292 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1293
1294 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1295 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1296
1297 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1298 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1299}
1300
Matthew Jackson28c94572019-07-18 10:47:03 +01001301void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1302{
1303 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1304
1305 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1306 CHECK_VALID_SIZE(inputs.size(), 1);
1307
1308 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1309 CHECK_VALID_SIZE(outputs.size(), 1);
1310
1311 L2NormalizationDescriptor desc;
1312 desc.m_DataLayout = armnn::DataLayout::NHWC;
1313 auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1314 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1315
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001316 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001317
Sadik Armagand109a4d2020-07-28 10:42:13 +01001318 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson28c94572019-07-18 10:47:03 +01001319 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1320
1321 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1322 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1323
1324 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1325 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1326}
1327
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001328void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1329{
1330 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1331}
1332
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001333void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1334{
1335 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1336
1337 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1338 CHECK_VALID_SIZE(inputs.size(), 2);
1339
1340 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1341 CHECK_VALID_SIZE(outputs.size(), 1);
1342
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001343 auto layerName = boost::str(boost::format("Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001344
1345 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1346 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1347 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001348
Sadik Armagand109a4d2020-07-28 10:42:13 +01001349 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001350 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1351
1352 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1353 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001354 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1355
1356 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1357 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1358 {
1359 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1360 }
1361 else
1362 {
1363 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1364 }
1365
1366 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1367 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1368}
1369
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001370void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1371{
1372 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1373
1374 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1375 CHECK_VALID_SIZE(inputs.size(), 2);
1376
1377 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1378 CHECK_VALID_SIZE(outputs.size(), 1);
1379
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001380 auto layerName = boost::str(boost::format("Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001381
1382 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1383 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1384 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001385
Sadik Armagand109a4d2020-07-28 10:42:13 +01001386 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001387 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1388
1389 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1390 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001391 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1392
1393 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1394 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1395 {
1396 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1397 }
1398 else
1399 {
1400 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1401 }
1402
1403 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1404 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1405}
1406
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001407void TfLiteParser::ParsePool(size_t subgraphIndex,
1408 size_t operatorIndex,
1409 PoolingAlgorithm algorithm)
1410{
1411 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1412
1413 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1414 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1415
1416 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1417
1418 std::string layerName;
1419
1420 switch (algorithm)
1421 {
1422 case PoolingAlgorithm::Average:
1423 layerName =
1424 boost::str(boost::format("AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1425 break;
1426 case PoolingAlgorithm::Max:
1427 layerName =
1428 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1429 break;
1430 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001431 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001432 }
1433
1434 Pooling2dDescriptor desc;
1435
1436 desc.m_PoolType = algorithm;
1437 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1438 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1439 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1440 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1441 desc.m_PaddingMethod = PaddingMethod::Exclude;
1442 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001443 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001444
1445 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1446 CHECK_VALID_SIZE(inputs.size(), 1);
1447 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1448
1449 // assuming input is NHWC
1450 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1451 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1452
Pablo Tellof0bd6832019-04-26 17:58:13 +01001453 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1454 desc.m_PadTop, desc.m_PadBottom, options->padding);
1455 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1456 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001457
1458 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1459 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001460
Sadik Armagand109a4d2020-07-28 10:42:13 +01001461 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001462 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1463
1464 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1465 ARMNN_ASSERT(layer != nullptr);
jimfly01c25411c2018-11-14 17:47:22 +00001466 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001467
1468 // register the input connection slots for the layer, connections are made after all layers have been created
1469 // only the tensors for the inputs are relevant, exclude the const tensors
1470 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001471 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001472
jimfly01c25411c2018-11-14 17:47:22 +00001473 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001474 // register the output connection slots for the layer, connections are made after all layers have been created
1475 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1476 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1477}
1478
josh minorba424d22019-11-13 10:55:17 -06001479void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
1480{
1481 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1482
1483 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1484 CHECK_VALID_SIZE(inputs.size(), 3);
1485 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1486 CHECK_VALID_SIZE(outputs.size(), 1);
1487
1488 SliceDescriptor desc;
1489
1490 // set begin tensor info for slice descriptor
1491 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1492 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1493
1494 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1495 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1496
1497 // set size tensor info for slice descriptor
1498 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1499 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1500
1501 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1502 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1503 desc = SliceDescriptor(begin, size);
1504
1505 auto layerName = boost::str(boost::format("Slice:%1%:%2%") % subgraphIndex % operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06001506
James Conroy05102392020-06-24 15:39:55 +01001507 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001508 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001509 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1510
1511 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
josh minorba424d22019-11-13 10:55:17 -06001512 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1513
1514 // register the input connection slots for the layer, connections are made after all layers have been created
1515 // only the tensors for the inputs are relevant, exclude the const tensors
1516 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1517 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1518
1519 // register the output connection slots for the layer, connections are made after all layers have been created
1520 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1521 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1522}
1523
telsoa01c577f2c2018-08-31 09:22:23 +01001524void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1525{
1526 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1527 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1528 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1529
1530 SoftmaxDescriptor desc;
1531 desc.m_Beta = options->beta;
1532
1533 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1534 CHECK_VALID_SIZE(inputs.size(), 1);
1535 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1536 CHECK_VALID_SIZE(outputs.size(), 1);
1537
1538 auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1539 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1540
Sadik Armagand109a4d2020-07-28 10:42:13 +01001541 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
telsoa01c577f2c2018-08-31 09:22:23 +01001542 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1543
1544 // register the input connection slots for the layer, connections are made after all layers have been created
1545 // only the tensors for the inputs are relevant, exclude the const tensors
1546 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1547 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1548
1549 // register the output connection slots for the layer, connections are made after all layers have been created
1550 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1551 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1552}
1553
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001554void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1555{
1556 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1557
1558 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1559 CHECK_VALID_SIZE(inputs.size(), 3);
1560
1561 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1562 CHECK_VALID_SIZE(outputs.size(), 1);
1563
1564 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1565 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1566
1567 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1568 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1569
1570 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1571 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1572
1573 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1574 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1575
1576 size_t step = 2;
1577 std::vector<std::pair<unsigned int, unsigned int>> padList;
1578 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1579 {
1580 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1581 }
1582
1583 armnn::SpaceToBatchNdDescriptor desc;
1584 desc.m_BlockShape = blockShape;
1585 desc.m_PadList = padList;
1586 desc.m_DataLayout = armnn::DataLayout::NHWC;
1587
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001588 auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001589
James Conroy05102392020-06-24 15:39:55 +01001590 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001591 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001592 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1593
1594 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1595 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001596 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1597
1598 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1599 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1600
1601 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1602 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1603}
1604
telsoa01c577f2c2018-08-31 09:22:23 +01001605armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1606 const armnn::TensorInfo & inputTensorInfo)
1607{
1608 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1609 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1610 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1611
1612 if (inputTensorInfo.GetNumDimensions() > 4)
1613 {
1614 std::stringstream ss;
1615 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1616 << " shape:" << inputTensorInfo.GetShape() << " "
1617 << CHECK_LOCATION().AsString();
1618 throw ParseException(ss.str());
1619 }
1620
1621 if (squeezeDims.empty())
1622 {
1623 squeezeDims.assign(dimensionSequence,
1624 dimensionSequence+inputTensorInfo.GetNumDimensions());
1625 }
1626
1627 std::vector<uint32_t> outputDims;
1628 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1629 {
1630 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1631 auto currentDimension = inputTensorInfo.GetShape()[i];
1632 if (skipSqueeze || currentDimension != 1)
1633 {
1634 outputDims.push_back(currentDimension);
1635 }
1636 }
1637
1638 if (outputDims.size() > 4)
1639 {
1640 std::stringstream ss;
1641 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1642 << " shape:" << inputTensorInfo.GetShape() << " "
1643 << CHECK_LOCATION().AsString();
1644 throw ParseException(ss.str());
1645 }
1646
1647 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1648 outputDims.data());
1649
1650 // we need to preserve the tensor type and the quantization data as well
1651 TensorInfo outTensorInfo = inputTensorInfo;
1652 outTensorInfo.SetShape(outShape);
1653
1654 return outTensorInfo;
1655}
1656
1657void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1658{
1659 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1660
1661 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1662 CHECK_VALID_SIZE(inputs.size(), 1);
1663
1664 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1665 CHECK_VALID_SIZE(outputs.size(), 1);
1666
1667 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1668 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Conroy05102392020-06-24 15:39:55 +01001669 auto layerName = boost::str(boost::format("Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001670
1671 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1672 armnn::TensorInfo outputTensorInfo =
1673 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1674 inputTensorInfo);
James Conroy05102392020-06-24 15:39:55 +01001675 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01001676
1677 ReshapeDescriptor reshapeDesc;
1678 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1679
telsoa01c577f2c2018-08-31 09:22:23 +01001680 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001681 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001682 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1683
1684 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1685 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1686
1687 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1688 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1689}
1690
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001691void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1692{
1693 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1694
1695 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1696 CHECK_VALID_SIZE(inputs.size(), 4);
1697
1698 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1699 CHECK_VALID_SIZE(outputs.size(), 1);
1700
1701 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1702 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1703
1704 StridedSliceDescriptor desc;
1705 desc.m_BeginMask = options->begin_mask;
1706 desc.m_EllipsisMask = options->ellipsis_mask;
1707 desc.m_EndMask = options->end_mask;
1708 desc.m_NewAxisMask = options->new_axis_mask;
1709 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1710 desc.m_DataLayout = armnn::DataLayout::NHWC;
1711
1712 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1713 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1714
1715 std::vector<int> begin(beginTensorInfo.GetNumElements());
1716 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1717
1718 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1719 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1720
1721 std::vector<int> end(endTensorInfo.GetNumElements());
1722 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1723
1724 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1725 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1726
1727 std::vector<int> stride(strideTensorInfo.GetNumElements());
1728 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1729
1730 desc.m_Begin = begin;
1731 desc.m_End = end;
1732 desc.m_Stride = stride;
1733
1734 auto layerName = boost::str(boost::format("StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1735 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001736 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001737
Sadik Armagand109a4d2020-07-28 10:42:13 +01001738 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001739 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1740
1741 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1742 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1743
1744 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1745 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1746}
1747
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001748void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1749{
1750 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1751
1752 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1753 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1754
1755 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1756 CHECK_VALID_SIZE(inputs.size(), 2);
1757
1758 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1759 CHECK_VALID_SIZE(outputs.size(), 1);
1760
1761 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1762 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1763
1764 auto layerName = boost::str(boost::format("Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1765 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001766 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001767
Sadik Armagand109a4d2020-07-28 10:42:13 +01001768 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001769 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1770
1771 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1772 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1773 {
1774 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1775 }
1776 else
1777 {
1778 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1779 }
1780
1781 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1782
1783 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1784 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1785}
1786
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301787void TfLiteParser::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
1788{
1789 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1790
1791 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1792 const auto * options = operatorPtr->builtin_options.AsDivOptions();
1793
1794 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1795 CHECK_VALID_SIZE(inputs.size(), 2);
1796
1797 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1798 CHECK_VALID_SIZE(outputs.size(), 1);
1799
1800 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1801 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1802
1803 auto layerName = boost::str(boost::format("Div:%1%:%2%") % subgraphIndex % operatorIndex);
1804 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001805 ARMNN_ASSERT(layer != nullptr);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301806
Sadik Armagand109a4d2020-07-28 10:42:13 +01001807 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301808 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1809
1810 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1811 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1812 {
1813 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1814 }
1815 else
1816 {
1817 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1818 }
1819
1820 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1821
1822 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1823 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1824}
1825
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001826void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1827{
1828 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1829
1830 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1831 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1832
1833 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1834 CHECK_VALID_SIZE(inputs.size(), 2);
1835
1836 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1837 CHECK_VALID_SIZE(outputs.size(), 1);
1838
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001839 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1840 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1841
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001842 auto layerName = boost::str(boost::format("Add:%1%:%2%") % subgraphIndex % operatorIndex);
1843 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001844 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001845
Sadik Armagand109a4d2020-07-28 10:42:13 +01001846 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001847 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1848
1849 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001850 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1851 {
1852 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1853 }
1854 else
1855 {
1856 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1857 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001858
1859 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1860
1861 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1862 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1863}
1864
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001865void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1866{
1867 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1868
1869 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1870 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1871
1872 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1873 CHECK_VALID_SIZE(inputs.size(), 2);
1874
1875 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1876 CHECK_VALID_SIZE(outputs.size(), 1);
1877
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001878 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1879 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1880
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001881 auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1882 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001883 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001884
Sadik Armagand109a4d2020-07-28 10:42:13 +01001885 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001886 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1887
1888 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001889 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
1890 {
1891 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1892 }
1893 else
1894 {
1895 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1896 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001897
1898 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1899
1900 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1901 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1902}
1903
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001904void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1905{
1906 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1907
1908 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1909
1910 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1911 CHECK_VALID_SIZE(outputs.size(), 1);
1912
1913 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1914 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1915
1916 armnn::MeanDescriptor desc;
1917 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1918 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1919 desc.m_Axis = axis;
1920
1921 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001922 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001923
1924 desc.m_KeepDims =
1925 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1926 true : false;
1927
1928 auto layerName = boost::str(boost::format("Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1929 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001930 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001931
1932 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1933
1934 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1935 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1936
1937 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1938 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1939}
1940
Darshan Patel83fcf982020-05-26 22:22:42 +05301941void TfLiteParser::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
1942{
1943 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1944
1945 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1946 CHECK_VALID_SIZE(inputs.size(), 1);
1947
1948 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1949 CHECK_VALID_SIZE(outputs.size(), 1);
1950
1951 auto layerName = boost::str(boost::format("Neg:%1%:%2%") % subgraphIndex % operatorIndex);
1952 armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Neg);
1953 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
1954 ARMNN_ASSERT(layer != nullptr);
1955
Sadik Armagand109a4d2020-07-28 10:42:13 +01001956 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel83fcf982020-05-26 22:22:42 +05301957 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1958
1959 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1960 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1961
1962 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1963 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1964}
1965
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001966void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1967{
1968 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1969
1970 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1971
1972 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1973 CHECK_VALID_SIZE(outputs.size(), 1);
1974
1975 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1976 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1977
1978 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1979 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1980
1981 size_t step = 2;
1982 armnn::PadDescriptor desc;
1983 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1984 {
1985 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1986 }
1987
1988 auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001989 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001990
1991 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1992 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001993 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1994
1995 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1996 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1997
1998 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1999 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2000}
2001
Sadik Armagan66dedc72019-12-10 16:32:07 +00002002void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
2003{
2004 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2005
2006 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2007 CHECK_VALID_SIZE(inputs.size(), 1);
2008
2009 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2010 CHECK_VALID_SIZE(outputs.size(), 1);
2011
2012 auto layerName = boost::str(boost::format("Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
2013
2014 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002015 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002016
Sadik Armagand109a4d2020-07-28 10:42:13 +01002017 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002018 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2019
2020 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2021 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2022
2023 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2024 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2025}
Finn Williamsc42c3842019-01-22 14:18:11 +00002026
Sadik Armagan58f39192018-09-17 14:14:39 +01002027void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
2028{
Finn Williamsc42c3842019-01-22 14:18:11 +00002029 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01002030}
2031
2032void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
2033{
Finn Williamsc42c3842019-01-22 14:18:11 +00002034 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
2035}
Sadik Armagan58f39192018-09-17 14:14:39 +01002036
Sadik Armagan12239e72020-05-27 11:06:17 +01002037void TfLiteParser::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
2038{
Jan Eilers2f746b32020-07-28 14:00:06 +01002039 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01002040}
2041
Finn Williamsc42c3842019-01-22 14:18:11 +00002042void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
2043{
2044 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
2045}
2046
Nina Drozd99851762019-04-09 09:37:38 +01002047void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
2048{
2049 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
2050}
2051
Jan Eilers2f746b32020-07-28 14:00:06 +01002052void TfLiteParser::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
2053{
2054 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
2055}
Finn Williamsc42c3842019-01-22 14:18:11 +00002056
2057void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
2058{
2059 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01002060 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00002061 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01002062
2063 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2064 CHECK_VALID_SIZE(inputs.size(), 1);
2065
2066 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2067 CHECK_VALID_SIZE(outputs.size(), 1);
2068
Finn Williamsc42c3842019-01-22 14:18:11 +00002069 auto layerName = str(boost::format("Activation:"));
Sadik Armagan58f39192018-09-17 14:14:39 +01002070 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00002071 activationDesc.m_Function = activationType;
2072
2073 switch (activationType)
2074 {
2075 case ActivationFunction::ReLu:
2076 {
2077 layerName += str(boost::format("RELU:%1%:%2%") % subgraphIndex % operatorIndex);
2078 break;
2079 }
2080 case ActivationFunction::BoundedReLu:
2081 {
2082 layerName += str(boost::format("RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
2083 activationDesc.m_A = 6.0f;
2084 activationDesc.m_B = 0.0f;
2085 break;
2086 }
2087 case ActivationFunction::Sigmoid:
2088 {
2089 layerName += str(boost::format("SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
2090 break;
2091 }
Nina Drozd99851762019-04-09 09:37:38 +01002092 case ActivationFunction::TanH:
2093 {
2094 layerName += str(boost::format("TANH:%1%:%2%") % subgraphIndex % operatorIndex);
2095 activationDesc.m_A = 1.0f;
2096 activationDesc.m_B = 1.0f;
2097 break;
2098 }
Sadik Armagan12239e72020-05-27 11:06:17 +01002099 case ActivationFunction::LeakyReLu:
2100 {
2101 layerName += str(boost::format("LEAKYRELU:%1%:%2%") % subgraphIndex % operatorIndex);
2102 const auto * options = operatorPtr->builtin_options.AsLeakyReluOptions();
2103 activationDesc.m_A = options->alpha;
2104 break;
2105 }
Jan Eilers2f746b32020-07-28 14:00:06 +01002106 case ActivationFunction::HardSwish:
2107 layerName += str(boost::format("HARDSWISH:%1%:%2%") % subgraphIndex % operatorIndex);
2108 break;
Finn Williamsc42c3842019-01-22 14:18:11 +00002109 default:
2110 {
2111 throw ParseException(
2112 boost::str(boost::format("Unexpected ActivationFunction[%1%] when creating layerName "
2113 " %2% ") %static_cast<int>(activationType)% CHECK_LOCATION().AsString()));
2114 }
2115 }
2116
2117 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01002118
Sadik Armagand109a4d2020-07-28 10:42:13 +01002119 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan58f39192018-09-17 14:14:39 +01002120 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2121
2122 // register the input connection slots for the layer, connections are made after all layers have been created
2123 // only the tensors for the inputs are relevant, exclude the const tensors
2124 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2125 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2126
2127 // register the output connection slots for the layer, connections are made after all layers have been created
2128 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2129 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2130}
Sadikb94967b2018-09-19 15:30:00 +01002131armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
2132 const std::vector<int32_t> & targetDimsIn)
2133{
2134 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2135 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2136
2137 if (stretchDim != targetDimsIn.end())
2138 {
2139 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2140 {
2141 throw ParseException(
2142 boost::str(
2143 boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
2144 }
2145
2146 auto targetNumElements =
2147 boost::numeric_cast<unsigned int>(
2148 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2149
2150 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2151 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2152 }
2153
2154 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2155
2156 TensorInfo reshapeInfo = inputTensorInfo;
2157 reshapeInfo.SetShape(outputShape);
2158
2159 return reshapeInfo;
2160}
2161
2162void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
2163{
2164 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2165
2166 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002167
2168 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2169 CHECK_VALID_SIZE(outputs.size(), 1);
2170
2171 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2172 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
James Conroy05102392020-06-24 15:39:55 +01002173 auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002174
2175 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00002176 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01002177 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00002178
Jan Eilersbac9b352020-07-13 13:40:24 +01002179 // Extracting new shape for the output
2180 // There are two ways it can be passed
2181 // * First is to define the target shape in the operator built-in options
2182 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00002183 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01002184 bool targetShapeFound = false;
2185 // Check if built-in options were given
2186 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00002187 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002188 // make sure the parameter is given
2189 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00002190 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002191 targetShape = options->new_shape;
2192 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00002193 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002194 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002195
2196 // If there is no built-in option given or if the built-in new_shape parameter was empty
2197 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00002198 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002199 // Check for a second input tensor
2200 if (inputs.size() > 1 && inputs[1] != nullptr)
2201 {
2202 if (inputs[1]->is_variable)
2203 {
2204 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
2205 }
2206
2207 if (inputs[1]->shape.size() != 1)
2208 {
2209 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
2210 }
2211
2212 if (inputs[1]->type != tflite::TensorType_INT32)
2213 {
2214 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
2215 }
2216
2217 // Extract target shape from input
2218 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2219 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
2220 for (int i=0; i < inputs[1]->shape[0]; ++i)
2221 {
2222 targetShape.push_back(values[i]);
2223 }
2224 }
2225 else
Derek Lambertic9e52792020-03-11 11:42:26 +00002226 {
2227 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
2228 "At least one method required");
2229 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002230 }
2231
kevmay0171972a82018-12-17 14:28:03 +00002232 armnn::TensorInfo reshapeOutputTensorInfo =
Derek Lambertic9e52792020-03-11 11:42:26 +00002233 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01002234
kevmay0171972a82018-12-17 14:28:03 +00002235 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002236 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
2237 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00002238 {
2239 std::stringstream ss;
2240 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002241 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00002242 << " does not equal output shape "
2243 << actualOutputTensorInfo.GetShape()
2244 << ": "
2245 << CHECK_LOCATION().AsString();
2246 throw ParseException(ss.str());
2247 }
2248
Sadikb94967b2018-09-19 15:30:00 +01002249 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00002250 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01002251
Sadikb94967b2018-09-19 15:30:00 +01002252 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002253 ARMNN_ASSERT(layer != nullptr);
kevmay0171972a82018-12-17 14:28:03 +00002254 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01002255
2256 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2257 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2258
2259 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2260 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2261}
2262
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002263void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
2264{
Sadik Armagana3b31f02019-12-05 09:08:53 +00002265 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
2266}
2267
2268void TfLiteParser::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
2269{
2270 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
2271}
2272
2273void TfLiteParser::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
2274{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002275 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2276
2277 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2278 CHECK_VALID_SIZE(inputs.size(), 2);
2279
2280 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2281 CHECK_VALID_SIZE(outputs.size(), 1);
2282
2283 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
2284
2285 // Data for the parsed tensor args (size) must be stored locally.
2286 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2287
2288 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2289 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2290
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002291 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002292 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002293 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002294 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2295 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002296
Sadik Armagana3b31f02019-12-05 09:08:53 +00002297 auto layerName = str(boost::format("Resize:"));
2298
2299 switch (resizeMethod)
2300 {
2301 case ResizeMethod::Bilinear:
2302 {
2303 layerName += str(boost::format("BILINEAR:%1%:%2%") % subgraphIndex % operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002304
2305 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2306 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2307
David Monahan4a0c9b92020-05-30 09:48:39 +01002308 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002309 break;
2310 }
2311 case ResizeMethod::NearestNeighbor:
2312 {
2313 layerName += str(boost::format("NEARESTNEIGHBOR:%1%:%2%") % subgraphIndex % operatorIndex);
2314 break;
2315 }
2316 default:
2317 {
2318 throw ParseException(
2319 boost::str(boost::format("Unexpected ResizeMethod[%1%] when creating layerName "
2320 " %2% ") %static_cast<int>(resizeMethod)% CHECK_LOCATION().AsString()));
2321 }
2322 }
2323
James Conroy05102392020-06-24 15:39:55 +01002324 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002325 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002326 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2327
2328 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
2329 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002330 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2331
2332 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2333 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2334
2335 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2336 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2337}
2338
Sadik Armagan479045b2018-10-01 11:51:37 +01002339void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
2340{
2341 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2342
2343 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2344 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2345
2346 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2347
2348 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2349 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2350 CHECK_VALID_SIZE(outputs.size(), 1);
2351
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002352 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2353 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002354
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002355 const unsigned int concatDimInput = static_cast<unsigned int>(
2356 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002357
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002358 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2359 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002360
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002361 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002362
2363 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2364 {
2365 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2366
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002367 // This set up concatDescriptor view origin
2368 armnnUtils::ProcessConcatInputTensorInfo(
2369 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002370 }
2371
2372 auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002373 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002374
Jim Flynn906f9462019-05-10 13:55:21 +01002375 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002376 ARMNN_ASSERT(layer != nullptr);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002377 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002378
James Conroy05102392020-06-24 15:39:55 +01002379 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002380 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002381
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002382 // add fused activation layer
2383 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002384
Sadik Armagan479045b2018-10-01 11:51:37 +01002385 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2386 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2387}
2388
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002389void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
2390{
2391 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2392
2393 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2394 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2395
2396 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2397
2398 FullyConnectedDescriptor desc;
2399 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002400 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002401
2402 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2403 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2404 CHECK_VALID_SIZE(outputs.size(), 1);
2405
2406 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2407
2408 // Fully Connected Layer accepts two dimensional weights input
2409 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2410 if (weightsDimension != 2)
2411 {
2412 throw ParseException(
2413 boost::str(
2414 boost::format(
2415 "Dimension %1% for Fully Connected weights is not supported by Armnn. "
2416 "Node %2%")
2417 % weightsDimension
2418 % CHECK_LOCATION().AsString()));
2419 }
2420
Matteo Martincigh747ef822018-12-18 09:26:39 +00002421 auto filterTensorAndData = CreateConstTensor(inputs[1],
2422 filterTensorInfo,
2423 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002424 armnn::IConnectableLayer* layer = nullptr;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002425 auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
2426
2427 if (inputs.size() == 3)
2428 {
2429 desc.m_BiasEnabled = true;
2430 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00002431 auto biasTensorAndData = CreateConstTensor(inputs[2],
2432 biasTensorInfo,
2433 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002434 layer = m_Network->AddFullyConnectedLayer(desc,
2435 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002436 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002437 layerName.c_str());
2438 }
2439 else
2440 {
2441 layer = m_Network->AddFullyConnectedLayer(desc,
2442 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002443 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002444 layerName.c_str());
2445 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002446 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002447
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002448 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2449
2450 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2451
2452 if (inputTensorInfo.GetNumDimensions() > 2)
2453 {
2454 // Add reshape to flatten to 2D [batch_size, input_size],
2455 // where "input_size" corresponds to the number of inputs to the layer,
2456 // matching the second dimension of weights,
2457 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2458 std::vector<unsigned int> reshapedDimensions(2);
2459 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2460 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2461
2462 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2463 {
2464 throw ParseException(
2465 boost::str(
2466 boost::format(
2467 "Failed to deduce input tensor shape from filter size %1%")
2468 % reshapedDimensions[1]
2469 % CHECK_LOCATION().AsString()));
2470 }
2471
2472 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2473 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2474
2475 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2476 armnn::ReshapeDescriptor desc;
2477 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2478 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2479
2480 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2481 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2482
2483 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2484 }
2485 else
2486 {
2487 // register the input connection slot for the layer
2488 // only the tensors for the inputs are relevant, exclude the const tensors
2489 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2490 }
2491
Sadik Armagand109a4d2020-07-28 10:42:13 +01002492 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002493 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2494
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002495 // we need to add the activation layer and fortunately we don't need to care about the data layout
2496 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2497 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002498
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002499 // register the output connection slots for the layer, connections are made after all layers have been created
2500 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2501 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2502}
2503
keidav011b3e2ea2019-02-21 10:07:37 +00002504void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
2505{
2506 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2507
2508 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2509
2510 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2511 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2512 CHECK_VALID_SIZE(outputs.size(), 4);
2513
2514 // Obtain custom options from flexbuffers
2515 auto custom_options = operatorPtr->custom_options;
2516 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2517
2518 // Obtain descriptor information from tf lite
2519 DetectionPostProcessDescriptor desc;
2520 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2521 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2522 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2523 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2524 desc.m_NumClasses = m["num_classes"].AsUInt32();
2525 desc.m_ScaleH = m["h_scale"].AsFloat();
2526 desc.m_ScaleW = m["w_scale"].AsFloat();
2527 desc.m_ScaleX = m["x_scale"].AsFloat();
2528 desc.m_ScaleY = m["y_scale"].AsFloat();
2529
keidav0107d58c72019-02-26 11:57:39 +00002530 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002531 {
keidav0107d58c72019-02-26 11:57:39 +00002532 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002533 }
2534 if (!(m["detections_per_class"].IsNull()))
2535 {
2536 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2537 }
2538
2539 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2540 {
2541 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2542 "must be positive and less than or equal to 1.");
2543 }
2544
2545 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2546 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2547 armnn::Optional<armnn::PermutationVector&>());
2548
2549 auto layerName = boost::str(boost::format("DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2550 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2551 layerName.c_str());
2552
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002553 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00002554
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002555 // The model does not specify the output shapes.
2556 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2557 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2558 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2559 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2560 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2561 m_OverridenOutputShapes.push_back({ 1 });
2562
keidav011b3e2ea2019-02-21 10:07:37 +00002563 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2564 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002565 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002566 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2567 }
2568
2569 // Register the input connection slots for the layer, connections are made after all layers have been created
2570 // only the tensors for the inputs are relevant, exclude the const tensors
2571 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2572 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2573
2574 // Register the output connection slots for the layer, connections are made after all layers have been created
2575 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2576 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2577 outputTensorIndexes[1],
2578 outputTensorIndexes[2],
2579 outputTensorIndexes[3]});
2580}
2581
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002582/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2583void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2584{
2585 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2586
2587 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2588 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2589 CHECK_VALID_SIZE(outputs.size(), 1);
2590
2591 if (inputs.size() < 1)
2592 {
2593 throw ParseException("Pack must have at least one input.");
2594 }
2595
2596 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2597 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2598
2599 StackDescriptor desc;
2600 desc.m_Axis = static_cast<uint32_t>(options->axis);
2601 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2602
2603 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2604 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2605 desc.m_InputShape = inputTensorInfo.GetShape();
2606
2607 auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2608 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2609
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002610 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002611
Sadik Armagand109a4d2020-07-28 10:42:13 +01002612 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002613 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2614
2615 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2616 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2617
2618 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2619 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2620}
2621
Nina Drozd200e3802019-04-15 09:47:39 +01002622void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2623{
2624 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2625
2626 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2627 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2628
2629 // This unpackAxis indicates the axis to unpack
2630 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2631
2632 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2633 CHECK_VALID_SIZE(inputs.size(), 1);
2634
2635 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002636
2637 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2638 {
2639 throw ParseException(
2640 boost::str(
2641 boost::format(
2642 "The unpack axis: %1% cannot be greater than or equal to "
2643 "the number of input dimension %2% %3%")
2644 % unpackAxis
2645 % inputTensorInfo.GetNumDimensions()
2646 % CHECK_LOCATION().AsString()));
2647 }
2648
Nina Drozd200e3802019-04-15 09:47:39 +01002649 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2650 // If num is not defined, automatically infer from the length of the dimension axis.
2651 if(unpackNum == 0)
2652 {
2653 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2654 }
2655
2656 // If unpack number cannot be inferred and is still zero, throw ParseException.
2657 if(unpackNum == 0)
2658 {
2659 throw ParseException("Number to unpack must greater than zero.");
2660 }
2661
2662 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2663 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2664
2665 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2666 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2667
2668 // Add current input shape to unpackDimSizes
2669 for (unsigned int i = 0; i < inputDimSize; ++i)
2670 {
2671 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2672 }
2673
2674 if (unpackDimSizes[unpackAxis] != unpackNum)
2675 {
2676 throw ParseException("Number to unpack must be the same as length of the dimension to "
2677 "unpack along.");
2678 }
2679
2680 unpackDimSizes[unpackAxis] /= unpackNum;
2681
2682 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2683 for (unsigned int j = 0; j < unpackNum; ++j)
2684 {
2685 // Set the size of the views.
2686 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2687 {
2688 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2689 }
2690 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2691 }
2692
2693 auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2694 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002695 ARMNN_ASSERT(layer != nullptr);
Nina Drozd200e3802019-04-15 09:47:39 +01002696
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002697 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2698 unpackDimSizes.data());
2699
Nina Drozd200e3802019-04-15 09:47:39 +01002700 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2701 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2702
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002703 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2704 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2705 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002706 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002707 std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
2708 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002709 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002710 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2711
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002712 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2713 outputTensorInfo.GetDataType(),
2714 outputTensorInfo.GetQuantizationScale(),
2715 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002716 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2717
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002718 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002719
2720 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2721 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2722 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2723 }
Nina Drozd200e3802019-04-15 09:47:39 +01002724}
2725
Nina Drozd0324f482019-04-08 10:52:10 +01002726void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2727{
2728 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2729
2730 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2731 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2732
2733 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2734
Nina Drozd200e3802019-04-15 09:47:39 +01002735 // If number of splits cannot be inferred and is zero, throw ParseException.
2736 if(numSplits == 0)
2737 {
2738 throw ParseException("Number to splits must greater than zero.");
2739 }
2740
Nina Drozd0324f482019-04-08 10:52:10 +01002741 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2742 CHECK_VALID_SIZE(inputs.size(), 2);
2743 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2744 CHECK_VALID_SIZE(outputs.size(), numSplits);
2745
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002746 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2747 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002748
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002749 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2750 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2751 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2752
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002753 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002754 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002755
Nina Drozd0324f482019-04-08 10:52:10 +01002756 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002757 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002758 {
2759 throw ParseException(
2760 boost::str(
2761 boost::format(
2762 "The number of dimensions: %1% for input tensors of the "
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002763 "split op cannot be greater than %2% %3%")
Nina Drozd0324f482019-04-08 10:52:10 +01002764 % inputTensorInfo.GetNumDimensions()
2765 % MaxNumOfTensorDimensions
2766 % CHECK_LOCATION().AsString()));
2767 }
2768
2769 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2770
2771 // Add current input shape to splitterDimSizes
2772 for (unsigned int i = 0; i < inputDimSize; ++i)
2773 {
2774 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2775 }
2776
2777 if (splitterDimSizes[splitDim] % numSplits != 0)
2778 {
2779 throw ParseException("Number of splits must evenly divide the dimension");
2780 }
2781 splitterDimSizes[splitDim] /= numSplits;
2782
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002783 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002784 for (unsigned int j = 0; j < numSplits; ++j)
2785 {
2786 // Set the size of the views.
2787 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2788 {
2789 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2790 }
2791 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2792 }
2793
2794 auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
2795 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002796 ARMNN_ASSERT(layer != nullptr);
Nina Drozd0324f482019-04-08 10:52:10 +01002797
2798 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002799 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002800
Nina Drozd0324f482019-04-08 10:52:10 +01002801 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2802 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002803 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002804 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002805 }
2806
2807 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2808 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2809}
2810
Derek Lambertif0176992020-04-28 13:37:49 +01002811unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
2812{
2813 int numDims = armnn::numeric_cast<int>(numDimsIn);
2814 int v = idx < 0 ? numDims + idx : idx;
2815 ARMNN_ASSERT(v >= 0);
2816 ARMNN_ASSERT(v < numDims);
2817
2818 return static_cast<unsigned int>(v);
2819}
2820
2821void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
2822{
2823 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2824
2825 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Ryan OShea86704732020-05-26 11:41:04 +01002826 const auto * options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01002827
2828 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2829 CHECK_VALID_SIZE(inputs.size(), 3);
2830
2831 auto& inputTensor = inputs[0];
2832 auto& splitsTensor = inputs[1];
2833 auto& axisTensor = inputs[2];
2834
2835 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
2836 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
2837 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
2838 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
2839
2840 // Inputs
2841 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2842 if (inputDimSize > MaxNumOfTensorDimensions)
2843 {
2844 throw ParseException(
2845 boost::str(
2846 boost::format(
2847 "The number of dimensions: %1% for input tensors of the "
Jan Eilersc0761e92020-06-29 16:48:44 +01002848 "SplitV op cannot be greater than %2% %3%")
Derek Lambertif0176992020-04-28 13:37:49 +01002849 % inputTensorInfo.GetNumDimensions()
2850 % MaxNumOfTensorDimensions
2851 % CHECK_LOCATION().AsString()));
2852 }
2853
2854 // Get split axis
2855 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
2856 std::vector<int> axisData(axisTensorInfo.GetNumElements());
2857 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2858 const unsigned int splitDim = ComputeWrappedIndex(axisData[0], inputTensorInfo.GetNumDimensions());
2859
Derek Lambertif0176992020-04-28 13:37:49 +01002860 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01002861 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01002862 unsigned int numSplits{0};
2863
2864 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01002865 {
2866 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01002867 }
2868 else
2869 {
Ryan OShea86704732020-05-26 11:41:04 +01002870 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01002871 }
2872
2873 if (numSplits <=0)
2874 {
2875 throw ParseException("SplitV has invalid number of splits");
2876 }
2877
Jan Eilersc0761e92020-06-29 16:48:44 +01002878 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01002879 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01002880 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01002881
Jan Eilersc0761e92020-06-29 16:48:44 +01002882 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01002883 int numInferred{0};
2884 unsigned int inferIdx{0};
2885 int splitSum{0};
2886 for (auto split : splitsData)
2887 {
2888 if (split < 0)
2889 {
2890 numInferred++;
2891 inferIdx = idx;
2892 }
2893 else
2894 {
2895 splitSum += split;
2896 }
2897 idx++;
2898 }
2899 // Check for inferred Axis
2900 if (numInferred == 0)
2901 {
2902 if (splitSum != numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
2903 {
2904 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
2905 }
2906 }
2907 else if (numInferred == 1)
2908 {
2909 splitsData[inferIdx] = numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
2910 }
2911 else
2912 {
2913 throw ParseException("Cannot infer split size for more than one split");
2914 }
2915
Derek Lambertif0176992020-04-28 13:37:49 +01002916 //Ouput size validation
2917 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2918 CHECK_VALID_SIZE(outputs.size(), numSplits);
2919
2920 // Setup Armnn descriptor
2921 SplitterDescriptor splitDesc(numSplits, inputDimSize);
2922 unsigned int accumSplit = 0;
2923 for (unsigned int j = 0; j < numSplits; ++j)
2924 {
2925 unsigned int splitSize = numeric_cast<unsigned int>(splitsData[j]);
2926
2927 // Set the size of the views.
2928 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
2929 {
2930 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
2931 if (dimIdx == splitDim)
2932 {
2933 dimSize = splitSize;
2934 }
2935 splitDesc.SetViewSize(j, dimIdx, dimSize);
2936 }
2937
2938 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
2939 accumSplit += splitSize;
2940 }
2941
Ryan OShea86704732020-05-26 11:41:04 +01002942 auto layerName = boost::str(boost::format("SplitV:%1%:%2%") % subgraphIndex % operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01002943 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002944 ARMNN_ASSERT(layer != nullptr);
Derek Lambertif0176992020-04-28 13:37:49 +01002945
2946 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2947 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2948
2949 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2950 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002951 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01002952 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
2953 }
2954
2955 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2956 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2957}
2958
Sadik Armagan58f39192018-09-17 14:14:39 +01002959armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2960 unsigned int outputSlot,
2961 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002962{
2963 ActivationDescriptor activationDesc;
2964 std::string layerName = prevLayer->GetName();
2965
2966 switch(activationType)
2967 {
2968 case tflite::ActivationFunctionType_NONE:
2969 {
2970 // this is a no-op: return previous layer
2971 return prevLayer;
2972 }
2973 case tflite::ActivationFunctionType_RELU:
2974 {
2975 activationDesc.m_Function = ActivationFunction::ReLu;
2976 layerName += ":RELU";
2977 break;
2978 }
2979 case tflite::ActivationFunctionType_RELU6:
2980 {
2981 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2982 activationDesc.m_A = 6.0f;
2983 activationDesc.m_B = 0.0f;
2984 layerName += ":RELU6";
2985 break;
2986 }
2987 case tflite::ActivationFunctionType_TANH:
2988 {
2989 activationDesc.m_Function = ActivationFunction::TanH;
2990 activationDesc.m_A = 1.0f;
2991 activationDesc.m_B = 1.0f;
2992 layerName += ":TANH";
2993 break;
2994 }
2995
2996 // I only put these here as a reminder what others we could support
2997 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2998 case tflite::ActivationFunctionType_SIGN_BIT:
2999 default:
3000 {
3001 throw ParseException(
3002 boost::str(
3003 boost::format("TfLite parser doesn't suppport fused activation: "
3004 "%1%/%2% %3% ") %
3005 activationType %
3006 tflite::EnumNameActivationFunctionType(activationType) %
3007 CHECK_LOCATION().AsString()));
3008
3009 }
3010 }
3011
3012 IConnectableLayer* activationLayer =
3013 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
3014
3015 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
3016 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
3017 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
3018 return activationLayer;
3019}
3020
3021TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
3022{
3023 if (fileName == nullptr)
3024 {
3025 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
3026 CHECK_LOCATION().AsString()));
3027 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01003028 std::error_code errorCode;
3029 fs::path pathToFile(fileName);
3030 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01003031 {
Derek Lambertic9e52792020-03-11 11:42:26 +00003032 std::string locationString = CHECK_LOCATION().AsString();
3033 std::string msg = boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
3034 fileName %
3035 errorCode %
3036 locationString);
3037 throw FileNotFoundException(msg);
telsoa01c577f2c2018-08-31 09:22:23 +01003038 }
3039 std::ifstream file(fileName, std::ios::binary);
3040 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3041 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3042 fileContent.size());
3043}
3044
3045TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
3046{
3047 if (binaryContent == nullptr)
3048 {
3049 throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
3050 CHECK_LOCATION().AsString()));
3051 }
3052 flatbuffers::Verifier verifier(binaryContent, len);
3053 if (verifier.VerifyBuffer<tflite::Model>() == false)
3054 {
3055 throw ParseException(
3056 boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
3057 "flatbuffers format. size:%1% %2%") %
3058 len %
3059 CHECK_LOCATION().AsString()));
3060 }
3061 return tflite::UnPackModel(binaryContent);
3062}
3063
3064TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
3065 size_t subgraphIndex,
3066 size_t operatorIndex)
3067{
3068 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3069
Derek Lambertiff05cc52019-04-26 13:05:17 +01003070 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3071 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003072
3073 size_t inputCount = operatorPtr->inputs.size();
3074 TensorRawPtrVector result(inputCount);
3075 for (size_t i=0; i<inputCount; ++i)
3076 {
3077 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003078 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003079 }
3080 return result;
3081}
3082
3083TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
3084 size_t subgraphIndex,
3085 size_t operatorIndex)
3086{
3087 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3088
Derek Lambertiff05cc52019-04-26 13:05:17 +01003089 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3090 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003091
3092 size_t outputCount = operatorPtr->outputs.size();
3093 TensorRawPtrVector result(outputCount);
3094 for (size_t i=0; i<outputCount; ++i)
3095 {
3096 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3097 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003098 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003099 }
3100 return result;
3101}
3102
3103TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
3104 size_t subgraphIndex)
3105{
3106 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003107 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003108
Derek Lambertiff05cc52019-04-26 13:05:17 +01003109 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003110 TensorIdRawPtrVector result(inputCount);
3111 for (size_t i=0; i<inputCount; ++i)
3112 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003113 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01003114 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003115 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003116 }
3117 return result;
3118}
3119
3120TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
3121 size_t subgraphIndex)
3122{
3123 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003124 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003125
Derek Lambertiff05cc52019-04-26 13:05:17 +01003126 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003127 TensorIdRawPtrVector result(outputCount);
3128 for (size_t i=0; i<outputCount; ++i)
3129 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003130 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3131 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003132 }
3133 return result;
3134}
3135
3136std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
3137 size_t subgraphIndex,
3138 size_t operatorIndex)
3139{
3140 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003141 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3142 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003143 return operatorPtr->inputs;
3144}
3145
3146std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
3147 size_t subgraphIndex,
3148 size_t operatorIndex)
3149{
3150 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003151 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3152 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003153 return operatorPtr->outputs;
3154}
3155
3156void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
3157 size_t operatorIndex,
3158 IConnectableLayer* layer,
3159 const std::vector<unsigned int>& tensorIndexes)
3160{
3161 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003162 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003163 if (tensorIndexes.size() != layer->GetNumInputSlots())
3164 {
3165 throw ParseException(
3166 boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%)"
3167 " for subgraph:%3% operator index:%4% %5%") %
3168 tensorIndexes.size() %
3169 layer->GetNumInputSlots() %
3170 subgraphIndex %
3171 operatorIndex %
3172 CHECK_LOCATION().AsString()));
3173 }
3174
3175 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
3176 {
3177 unsigned int tensorIndex = tensorIndexes[slotIndex];
3178 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
3179 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
3180 }
3181}
3182
3183void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
3184 size_t operatorIndex,
3185 IConnectableLayer* layer,
3186 const std::vector<unsigned int>& tensorIndexes)
3187{
3188 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003189 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003190 if (tensorIndexes.size() != layer->GetNumOutputSlots())
3191 {
3192 throw ParseException(
3193 boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%)"
3194 " for subgraph:%3% operator index:%4% %5%") %
3195 tensorIndexes.size() %
3196 layer->GetNumOutputSlots() %
3197 subgraphIndex %
3198 operatorIndex %
3199 CHECK_LOCATION().AsString()));
3200 }
3201
3202 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
3203 {
3204 unsigned int tensorIndex = tensorIndexes[slotIndex];
3205 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
3206 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3207 }
3208}
3209
3210void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
3211{
3212 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3213
3214 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
3215 for (auto const & tensorIdAndPtr : inputs)
3216 {
3217 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3218 IConnectableLayer* layer =
3219 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3220
3221 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
3222 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3223
3224 RegisterOutputSlots(subgraphIndex,
3225 VIRTUAL_OPERATOR_ID,
3226 layer,
3227 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3228 }
3229}
3230
3231void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
3232{
3233 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3234
3235 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
3236 for (auto const & tensorIdAndPtr : outputs)
3237 {
3238 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3239 IConnectableLayer* layer =
3240 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3241
3242 RegisterInputSlots(subgraphIndex,
3243 VIRTUAL_OPERATOR_ID,
3244 layer,
3245 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3246 }
3247}
3248
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003249void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
3250{
3251 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3252
Derek Lambertiff05cc52019-04-26 13:05:17 +01003253 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003254 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
3255 {
3256 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
3257 {
3258 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
3259 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
3260 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003261 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003262 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
3263 auto tensorAndData = CreateConstTensor(tensorPtr,
3264 tensorInfo,
3265 armnn::Optional<armnn::PermutationVector&>());
3266
3267 std::string layerName = boost::str(boost::format("Constant:%1%") % tensorPtr->name);
3268 IConnectableLayer *layer =
3269 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
3270
3271 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3272 RegisterOutputSlots(subgraphIndex,
3273 VIRTUAL_OPERATOR_ID,
3274 layer,
3275 { tensorIndex });
3276
3277 }
3278 }
3279 }
3280}
3281
telsoa01c577f2c2018-08-31 09:22:23 +01003282// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
3283TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
3284{
3285 CHECK_BUFFER(model, bufferIndex);
3286 return model->buffers[bufferIndex].get();
3287}
3288
Matteo Martincigh747ef822018-12-18 09:26:39 +00003289template<typename T>
3290std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
3291TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
3292 TfLiteParser::TensorRawPtr tensorPtr,
3293 armnn::TensorInfo& tensorInfo,
3294 armnn::Optional<armnn::PermutationVector&> permutationVector)
3295{
3296 auto constData = CreateConstTensorImpl<T>(bufferPtr,
3297 tensorPtr,
3298 tensorInfo,
3299 permutationVector);
3300 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
3301 return std::make_pair(constData.first, std::move(storage));
3302}
3303
telsoa01c577f2c2018-08-31 09:22:23 +01003304std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
3305TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00003306 armnn::TensorInfo& tensorInfo,
3307 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01003308{
3309 CHECK_TENSOR_PTR(tensorPtr);
3310 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
3311 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
3312
3313 switch (tensorInfo.GetDataType())
3314 {
3315 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003316 return CreateConstTensorAndStoreData<float>(bufferPtr,
3317 tensorPtr,
3318 tensorInfo,
3319 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00003320 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003321 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
3322 tensorPtr,
3323 tensorInfo,
3324 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00003325 case armnn::DataType::QSymmS8:
3326 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3327 tensorPtr,
3328 tensorInfo,
3329 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00003330 case armnn::DataType::QAsymmS8:
3331 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3332 tensorPtr,
3333 tensorInfo,
3334 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003335 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003336 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
3337 tensorPtr,
3338 tensorInfo,
3339 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003340 default:
3341 {
3342 std::stringstream errString;
3343 errString << "Unexpected datatype when creating const tensor: "
3344 << armnn::GetDataTypeName(tensorInfo.GetDataType())
3345 << " shape:" << tensorInfo.GetShape()
3346 << CHECK_LOCATION().AsString();
3347 throw ParseException(errString.str());
3348 }
3349 }
3350}
3351
3352BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
3353 const std::string& name) const
3354{
3355 CHECK_SUBGRAPH(m_Model, subgraphId);
3356 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3357 for (auto const & input : inputs)
3358 {
3359 if (input.second->name == name)
3360 {
3361 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3362 return std::make_pair(bindingId, ToTensorInfo(input.second));
3363 }
3364 }
3365
3366 std::stringstream bindings;
3367 for (auto const & input : inputs)
3368 {
3369 bindings << "'" << input.second->name << "' ";
3370 }
3371
3372 throw ParseException(
3373 boost::str(
3374 boost::format("No input binding found for subgraph:%1% and name:%2%. "
3375 "Possible inputs are: [%3%] %4%") %
3376 subgraphId %
3377 name %
3378 bindings.str() %
3379 CHECK_LOCATION().AsString()));
3380}
3381
3382BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
3383 const std::string& name) const
3384{
3385 CHECK_SUBGRAPH(m_Model, subgraphId);
3386 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003387 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01003388 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003389 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01003390 if (output.second->name == name)
3391 {
3392 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003393 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3394 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3395 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01003396 }
3397 }
3398
3399 std::stringstream bindings;
3400 for (auto const & output : outputs)
3401 {
3402 bindings << "'" << output.second->name << "' ";
3403 }
3404
3405 throw ParseException(
3406 boost::str(
3407 boost::format("No output binding found for subgraph:%1% and name:%2%. "
3408 "Possible outputs are: [%3%] %4%") %
3409 subgraphId %
3410 name %
3411 bindings.str() %
3412 CHECK_LOCATION().AsString()));
3413}
3414
3415size_t TfLiteParser::GetSubgraphCount() const
3416{
3417 return m_Model->subgraphs.size();
3418}
3419
3420std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
3421{
3422 CHECK_SUBGRAPH(m_Model, subgraphId);
3423 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3424 std::vector<std::string> result;
3425 result.reserve(inputs.size());
3426 for (auto const & input : inputs)
3427 {
3428 result.push_back(input.second->name);
3429 }
3430 return result;
3431}
3432
3433std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
3434{
3435 CHECK_SUBGRAPH(m_Model, subgraphId);
3436 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3437 std::vector<std::string> result;
3438 result.reserve(outputs.size());
3439 for (auto const & output : outputs)
3440 {
3441 result.push_back(output.second->name);
3442 }
3443 return result;
3444}
3445
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003446ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003447{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003448 return new TfLiteParser(options);
telsoa01c577f2c2018-08-31 09:22:23 +01003449}
3450
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003451ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003452{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003453 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +01003454}
3455
3456void ITfLiteParser::Destroy(ITfLiteParser* parser)
3457{
3458 delete parser;
3459}
3460
3461TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
3462: m_FloatData(std::move(data))
3463, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003464, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003465, m_Int32Data(nullptr)
3466{
3467}
3468
3469TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
3470: m_FloatData(nullptr)
3471, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00003472, m_Int8Data(nullptr)
3473, m_Int32Data(nullptr)
3474{
3475}
3476
3477TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
3478: m_FloatData(nullptr)
3479, m_Uint8Data(nullptr)
3480, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01003481, m_Int32Data(nullptr)
3482{
3483}
3484
3485TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
3486: m_FloatData(nullptr)
3487, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003488, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003489, m_Int32Data(std::move(data))
3490{
3491}
3492
3493} // armnnTfLiteParser