blob: 104a55e675cab4ed6620bd5c6b94602972d3caf6 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kellyc5789ca2020-07-06 19:24:15 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Sloyanac001ee2021-02-03 10:43:04 +00008#include "armnnTfLiteParser/Version.hpp"
9
Sadik Armagand109a4d2020-07-28 10:42:13 +010010#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +000011#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010012#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000013#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010014#include <armnn/Tensor.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000015#include <armnnUtils/TensorUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010016#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010017#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000018#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010019#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010020
21// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000022#include <armnnUtils/Permute.hpp>
Rob Hughes9542f902021-07-14 09:48:54 +010023#include <armnnUtils/Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000024
Sadik Armagan479045b2018-10-01 11:51:37 +010025#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026#include <VerificationHelpers.hpp>
27
28// The generated code based on the Tf Lite schema:
29#include <schema_generated.h>
30
Matteo Martincighe011d202019-11-28 11:35:47 +000031#include <flatbuffers/flexbuffers.h>
32
James Ward58dec6b2020-09-11 17:32:44 +010033#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010034
Jim Flynnfca233e2021-09-23 12:16:53 +010035#include <tensorflow/lite/version.h>
36
telsoa01c577f2c2018-08-31 09:22:23 +010037#include <algorithm>
Matthew Sloyanac001ee2021-02-03 10:43:04 +000038#include <fstream>
39#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010040#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010041#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000042#include <sstream>
43
44#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
45 { \
46 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
47 << ": " \
48 << CHECK_LOCATION().AsString()).str()); \
49 }
telsoa01c577f2c2018-08-31 09:22:23 +010050
51using namespace armnn;
52using armnn::CheckLocation;
53namespace armnnTfLiteParser
54{
Kevin May7d96b162021-02-03 17:38:41 +000055
56ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
57 pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
58
59ITfLiteParser::~ITfLiteParser() = default;
60
61ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
62{
63 return new ITfLiteParser(options);
64}
65
66ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
67{
68 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
69}
70
71void ITfLiteParser::Destroy(ITfLiteParser* parser)
72{
73 delete parser;
74}
75
76armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
77{
78 return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
79}
80
81armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
82{
83 return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
84}
85
86BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
87 const std::string& name) const
88{
89 return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
90}
91
92BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
93 const std::string& name) const
94{
95 return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
96}
97
98size_t ITfLiteParser::GetSubgraphCount() const
99{
100 return pTfLiteParserImpl->GetSubgraphCount();
101}
102
103std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
104{
105 return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
106}
107
108std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
109{
110 return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
111}
112
telsoa01c577f2c2018-08-31 09:22:23 +0100113namespace
114{
jimfly01c25411c2018-11-14 17:47:22 +0000115
telsoa01c577f2c2018-08-31 09:22:23 +0100116const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
117
Kevin May7d96b162021-02-03 17:38:41 +0000118void CheckSubgraph(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100119 size_t subgraphIndex,
120 const CheckLocation & location)
121{
122 if (model.get() == nullptr)
123 {
124 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100125 fmt::format("{} was called with invalid (null) model. "
126 "Possible reason is that the model is not yet loaded and Unpack(ed). "
127 "subgraph:{} at {}",
128 location.m_Function,
129 subgraphIndex,
130 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100131 }
132 else if (subgraphIndex >= model->subgraphs.size())
133 {
134 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100135 fmt::format("{} was called with an invalid subgraph index. "
136 "subgraph:{} at {}",
137 location.m_Function,
138 subgraphIndex,
139 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100140 }
141}
142
143#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
144 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
145
Kevin May7d96b162021-02-03 17:38:41 +0000146void CheckModel(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100147 size_t subgraphIndex,
148 size_t operatorIndex,
149 const CheckLocation & location)
150{
151 if (model.get() == nullptr)
152 {
153 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100154 fmt::format("{} was called with invalid (null) model. "
155 "Possible reason is that the model is not yet loaded and Unpack(ed). "
156 "subgraph:{} operator:{} at {}",
157 location.m_Function,
158 subgraphIndex,
159 operatorIndex,
160 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100161 }
162 else if (subgraphIndex >= model->subgraphs.size())
163 {
164 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100165 fmt::format("{} was called with an invalid subgraph index. "
166 "subgraph:{} operator:{} at {}",
167 location.m_Function,
168 subgraphIndex,
169 operatorIndex,
170 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100171 }
172 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
173 operatorIndex != VIRTUAL_OPERATOR_ID)
174 {
175 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100176 fmt::format("{} was called with an invalid operator index. "
177 "subgraph:{} operator:{} at {}",
178 location.m_Function,
179 subgraphIndex,
180 operatorIndex,
181 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100182 }
183}
184
185#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
186 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
187
Kevin May7d96b162021-02-03 17:38:41 +0000188void CheckTensor(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100189 size_t subgraphIndex,
190 size_t tensorIndex,
191 const CheckLocation & location)
192{
193 // not checking model, because I assume CHECK_MODEL already run
194 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100195 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100196
197 // also subgraph index should be checked by CHECK_MODEL so
198 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100199 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100200
201 // the tensor index is the only one to check here
202 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
203 {
204 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100205 fmt::format("{} was called with an invalid tensor index. "
206 "subgraph:{} tensor:{} at {}",
207 location.m_Function,
208 subgraphIndex,
209 tensorIndex,
210 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100211 }
212}
213
214#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
215 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
216
Kevin May7d96b162021-02-03 17:38:41 +0000217void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100218 const CheckLocation & location)
219{
220 if (rawPtr == nullptr)
221 {
222 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100223 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100224 }
225}
226
227#define CHECK_TENSOR_PTR(TENSOR_PTR) \
228 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
229
Kevin May7d96b162021-02-03 17:38:41 +0000230void CheckBuffer(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100231 size_t bufferIndex,
232 const CheckLocation & location)
233{
234 if (model.get() == nullptr)
235 {
236 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100237 fmt::format("{} was called with invalid (null) model. "
238 "Possible reason is that the model is not yet loaded and Unpack(ed). "
239 "buffer:{} at {}",
240 location.m_Function,
241 bufferIndex,
242 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100243 }
244 else if (bufferIndex >= model->buffers.size())
245 {
246 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100247 fmt::format("{} was called with an invalid buffer index. "
248 "buffer index:{} at {}",
249 location.m_Function,
250 bufferIndex,
251 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100252 }
253 else if (model->buffers[bufferIndex].get() == nullptr)
254 {
255 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100256 fmt::format("The buffer #{} is null. {}",
257 bufferIndex,
258 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100259 }
260}
261
262#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
263 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
264
Kevin May7d96b162021-02-03 17:38:41 +0000265void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100266 const armnn::TensorInfo & tensorInfo,
267 uint32_t bufferId,
268 const CheckLocation & location)
269{
270 if (bufferPtr == nullptr)
271 {
272 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100273 fmt::format("BufferPtr is null for buffer:{}. {}",
274 bufferId,
275 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100276 }
277 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
278 tensorInfo.GetNumBytes() > bufferPtr->data.size())
279 {
280 std::stringstream ss;
281 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
282 << "For tensor: " << tensorInfo.GetShape()
283 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
284 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
285 throw ParseException(ss.str());
286 }
287}
288
289#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
290 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
291
292bool IsActivationSupported(tflite::ActivationFunctionType activationType)
293{
294 switch(activationType)
295 {
296 case tflite::ActivationFunctionType_NONE:
297 case tflite::ActivationFunctionType_RELU:
298 case tflite::ActivationFunctionType_RELU6:
299 case tflite::ActivationFunctionType_TANH:
300 {
301 return true;
302 }
303 default:
304 {
305 return false;
306 }
307 }
308}
309
310#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
311 do { \
312 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
313 { \
314 throw ParseException( \
James Ward58dec6b2020-09-11 17:32:44 +0100315 fmt::format("TfLite parser doesn't suppport fused activation: " \
316 "{}/{} in {} subgraph:{} operator:{} at {}", \
317 OPTION->fused_activation_function, \
318 tflite::EnumNameActivationFunctionType(\
319 OPTION->fused_activation_function), \
320 __func__, \
321 SUBGRAPH_INDEX, \
322 OPERATOR_INDEX, \
323 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100324 } \
325 } while(false)
326
327
328std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
329{
330 std::vector<unsigned int> result;
331 result.reserve(in.size());
332 for (auto & i : in)
333 {
mathad01c21025d2021-04-26 10:09:37 +0100334 // If the location of the input data is -1 then the input should be ignored.
335 if (i == -1)
336 {
337 continue;
338 }
telsoa01c577f2c2018-08-31 09:22:23 +0100339 result.push_back(CHECKED_NON_NEGATIVE(i));
340 }
341 return result;
342}
343
344void CalcPadding(uint32_t inputSize,
345 uint32_t filterSize,
346 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100347 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100348 uint32_t& paddingFront,
349 uint32_t& paddingBack,
350 tflite::Padding padding)
351{
352 paddingFront = 0;
353 paddingBack = 0;
354 if (padding == tflite::Padding_SAME)
355 {
356 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100357 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
358 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100359 if (temp > inputSize)
360 {
361 paddingFront = (temp - inputSize) / 2;
362 paddingBack = (temp - inputSize) - paddingFront;
363 }
364 }
365}
366
Kevin May7d96b162021-02-03 17:38:41 +0000367armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Finn Williamsb49ed182021-06-29 15:50:08 +0100368 const std::vector<unsigned int>& shape,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100369 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100370{
371 armnn::DataType type;
372 CHECK_TENSOR_PTR(tensorPtr);
373
374 switch (tensorPtr->type)
375 {
376 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000377 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100378 break;
379 case tflite::TensorType_FLOAT32:
380 type = armnn::DataType::Float32;
381 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000382 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000383 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000384 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000385 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000386 type = armnn::DataType::QAsymmS8;
387 }
388 else
389 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000390 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000391 type = armnn::DataType::QSymmS8;
392 }
Finn Williamsed66d142019-12-06 09:55:55 +0000393 break;
394 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000395 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000396 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100397 case tflite::TensorType_INT32:
398 type = armnn::DataType::Signed32;
399 break;
Inki Daed4619e22020-09-10 15:33:54 +0900400 case tflite::TensorType_INT64:
401 type = armnn::DataType::Signed64;
402 break;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100403 case tflite::TensorType_BOOL:
404 type = armnn::DataType::Boolean;
405 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100406 default:
407 {
408 CheckLocation location = CHECK_LOCATION();
409 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100410 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
411 tensorPtr->type,
412 tflite::EnumNameTensorType(tensorPtr->type),
413 tensorPtr->name,
414 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100415 }
416 }
Finn Williamsb49ed182021-06-29 15:50:08 +0100417 TensorShape tensorShape;
418
419 std::vector<unsigned int> safeShape = shape;
420 if (shape.size() == 0)
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100421 {
422 safeShape.push_back(1);
Finn Williamsb49ed182021-06-29 15:50:08 +0100423 }
424
425 if (!outputTensor)
426 {
427 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()), safeShape.data());
428 }
429 else
430 {
Rob Hughesd812a312021-08-06 13:10:53 +0100431 size_t shapeSignatureSize = tensorPtr->shape_signature.size();
Finn Williamsb49ed182021-06-29 15:50:08 +0100432
433 // If a shape signature exists we will use that to infer dynamic tensors
434 if (shapeSignatureSize != 0)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100435 {
Finn Williamsb49ed182021-06-29 15:50:08 +0100436 // If the shape is incompatible with the shape signature override the shape
437 if (shapeSignatureSize != shape.size())
438 {
439 safeShape = {};
440
441 for (unsigned int i = 0; i < shapeSignatureSize; ++i)
442 {
443 unsigned int dim = tensorPtr->shape_signature[i] > -1 ?
444 static_cast<unsigned int>(tensorPtr->shape_signature[i]) : 0;
445 safeShape.push_back(dim);
446 }
447 }
448
Rob Hughesd812a312021-08-06 13:10:53 +0100449 std::unique_ptr<bool[]> dimMask = std::make_unique<bool[]>(tensorPtr->shape_signature.size());
Finn Williamsb49ed182021-06-29 15:50:08 +0100450 for (unsigned int i = 0; i < tensorPtr->shape_signature.size(); ++i)
451 {
452 dimMask[i] = tensorPtr->shape_signature[i] == -1 ? false : true;
453 }
Rob Hughesd812a312021-08-06 13:10:53 +0100454 tensorShape = TensorShape(static_cast<unsigned int>(safeShape.size()), safeShape.data(), dimMask.get());
Finn Williamsb49ed182021-06-29 15:50:08 +0100455 }
456 // If there is no shape signature treat the tensor as dynamic if the shape has a size of zero
457 else if (shape.size() == 0)
458 {
459 tensorShape = TensorShape(1, false);
460 }
461 else
462 {
463 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(shape.size()), shape.data());
Sadik Armagand109a4d2020-07-28 10:42:13 +0100464 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100465 }
466
Keith Davisd305e1a2020-01-22 11:57:54 +0000467 float quantizationScale = 0.0f;
468 int32_t quantizationOffset = 0;
469
470 if (tensorPtr->quantization.get())
471 {
472 if (tensorPtr->quantization->scale.size() <= 1)
473 {
474 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
475 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
476
477 if (tensorPtr->quantization->scale.size() == 1)
478 {
479 quantizationScale = tensorPtr->quantization->scale[0];
480 }
481 if (tensorPtr->quantization->zero_point.size() == 1)
482 {
483 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000484 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100485 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000486 }
487
Sadik Armagand109a4d2020-07-28 10:42:13 +0100488 armnn::TensorInfo result(tensorShape,
489 type,
490 quantizationScale,
491 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000492 return result;
493 }
494 else
495 {
496 std::vector<float> quantizationScales;
497 std::vector<int32_t> quantizationOffsets;
498
499 // Scale
500 std::copy(tensorPtr->quantization->scale.begin(),
501 tensorPtr->quantization->scale.end(),
502 std::back_inserter(quantizationScales));
503
Keith Davis0c2eeac2020-02-11 16:51:50 +0000504 // QSymmS8 Per-axis
Sadik Armagand109a4d2020-07-28 10:42:13 +0100505 armnn::TensorInfo result(tensorShape,
506 type,
507 quantizationScales,
Jan Eilers7612bd62021-04-06 17:29:03 +0100508 armnn::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
Keith Davisd305e1a2020-01-22 11:57:54 +0000509 return result;
510 }
511 }
512 else
513 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100514 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000515 type,
516 quantizationScale,
517 quantizationOffset);
518 return result;
519 }
telsoa01c577f2c2018-08-31 09:22:23 +0100520}
521
Jan Eilers7612bd62021-04-06 17:29:03 +0100522armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr)
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000523{
524 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100525 return ToTensorInfo(tensorPtr, dimensions);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000526}
527
Kevin May7d96b162021-02-03 17:38:41 +0000528armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100529 const bool outputTensor)
530{
531 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100532 return ToTensorInfo(tensorPtr, dimensions, outputTensor);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100533}
534
telsoa01c577f2c2018-08-31 09:22:23 +0100535template<typename T>
536std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
Kevin May7d96b162021-02-03 17:38:41 +0000537CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
538 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000539 armnn::TensorInfo& tensorInfo,
540 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100541{
Jan Eilers8eb25602020-03-09 12:13:48 +0000542 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100543 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
544 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
James Ward58dec6b2020-09-11 17:32:44 +0100545 fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
telsoa01c577f2c2018-08-31 09:22:23 +0100546
547 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000548
549 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
550 {
551 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000552 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
553 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000554 }
555 else
556 {
557 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
558 }
559
Matthew Sloyan81beae32021-07-13 19:46:11 +0100560 // Make sure isConstant flag is set.
561 tensorInfo.SetConstant();
562
telsoa01c577f2c2018-08-31 09:22:23 +0100563 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
564}
565
telsoa01c577f2c2018-08-31 09:22:23 +0100566armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
567{
568 // generate the binding id by shifting the tensor id by 8 bit
569 // and add the subgraph id, which allows 256 subgraphs
570 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
571}
572
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000573bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
574{
575 const unsigned int actualSize = actual.GetNumDimensions();
576 if (actualSize != expected.size())
577 {
578 return false;
579 }
580
581 for (unsigned int i = 0u; i < actualSize; i++)
582 {
583 if (expected[i] < 0 ||
584 actual[i] != static_cast<unsigned int>(expected[i]))
585 {
586 return false;
587 }
588 }
589
590 return true;
591}
592
James Conroy05102392020-06-24 15:39:55 +0100593void CheckMatchingQuantization(const TensorInfo& first,
594 const TensorInfo& second,
595 const std::string& descName,
596 std::string const& firstName,
597 std::string const& secondName)
598{
599 if (!first.IsQuantized() ||
600 !second.IsQuantized())
601 {
602 // Not a quantized type, ignore the validation
603 return;
604 }
605
606 DataType firstDataType = first.GetDataType();
607 DataType secondDataType = second.GetDataType();
608
609 if (firstDataType != secondDataType)
610 {
611 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
612 " must be of the same quantized type, " +
613 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
614 secondName + " is " + GetDataTypeName(secondDataType));
615 }
616
617 if (!first.IsTypeSpaceMatch(second))
618 {
619 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
620 " must have the same quantization space, " +
621 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
622 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
623 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
624 " and scale " + std::to_string(second.GetQuantizationScale()));
625 }
626}
627
telsoa01c577f2c2018-08-31 09:22:23 +0100628} // <anonymous>
629
Kevin May7d96b162021-02-03 17:38:41 +0000630TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100631: m_Options(options)
632, m_Network(nullptr, nullptr)
Kevin May7d96b162021-02-03 17:38:41 +0000633, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
telsoa01c577f2c2018-08-31 09:22:23 +0100634{
635 // register supported operators
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100636 m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
Kevin May7d96b162021-02-03 17:38:41 +0000637 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100638 m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
639 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
Kevin May7d96b162021-02-03 17:38:41 +0000640 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
641 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
mathad01b392e982021-04-07 12:07:30 +0100642 m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
Kevin May7d96b162021-02-03 17:38:41 +0000643 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
644 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
Matthew Sloyaneb5f8102021-10-05 17:31:42 +0100645 m_ParserFunctions[tflite::BuiltinOperator_CONV_3D] = &TfLiteParserImpl::ParseConv3D;
Kevin May7d96b162021-02-03 17:38:41 +0000646 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
647 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
648 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
649 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100650 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000651 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300652 m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000653 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
Teresa Charlin3ab85482021-06-08 16:59:29 +0100654 m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
Kevin May7d96b162021-02-03 17:38:41 +0000655 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
656 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300657 m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
658 m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000659 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
660 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300661 m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
662 m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
Mike Kelly31dce2b2021-09-01 21:22:37 +0100663 m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
664 = &TfLiteParserImpl::ParseLocalResponseNormalization;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100665 m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
Kevin May7d96b162021-02-03 17:38:41 +0000666 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
667 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
668 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
669 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
670 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
671 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
672 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
673 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300674 m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000675 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
676 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +0100677 m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
Kevin May7d96b162021-02-03 17:38:41 +0000678 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
679 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
680 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
Sadik Armagana2747482021-02-09 10:28:54 +0000681 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
682 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
Teresa Charlin4e3e8312021-08-05 12:34:37 +0100683 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
Kevin May7d96b162021-02-03 17:38:41 +0000684 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
685 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
686 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100687 m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
Keith Davis0176fd82021-06-01 17:36:32 +0100688 m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
Kevin May7d96b162021-02-03 17:38:41 +0000689 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
690 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
691 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
692 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
693 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
694 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
695 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
696 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
697 m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
698 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
699 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
700 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
701 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100702
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100703 // register supported custom operators
Kevin May7d96b162021-02-03 17:38:41 +0000704 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100705}
706
Kevin May7d96b162021-02-03 17:38:41 +0000707void TfLiteParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100708{
709 m_Network = armnn::INetworkPtr(nullptr, nullptr);
710 m_Model = nullptr;
711 m_SubgraphConnections.clear();
712}
713
Kevin May7d96b162021-02-03 17:38:41 +0000714INetworkPtr TfLiteParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100715{
716 ResetParser();
717 m_Model = LoadModelFromFile(graphFile);
718 return CreateNetworkFromModel();
719}
720
Kevin May7d96b162021-02-03 17:38:41 +0000721INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
telsoa01c577f2c2018-08-31 09:22:23 +0100722{
723 ResetParser();
724 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
725 return CreateNetworkFromModel();
726}
727
Finn Williamsb49ed182021-06-29 15:50:08 +0100728
729armnn::INetworkPtr TfLiteParserImpl::LoadModel(std::unique_ptr<tflite::ModelT> model)
730{
731 ResetParser();
732 m_Model = std::move(model);
733
734 return CreateNetworkFromModel();
735}
736
Kevin May7d96b162021-02-03 17:38:41 +0000737INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
telsoa01c577f2c2018-08-31 09:22:23 +0100738{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100739
740 using NetworkOptions = std::vector<BackendOptions>;
741 NetworkOptions networkOptions = {};
742 if (m_Options && m_Options.value().m_InferAndValidate)
743 {
744 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
745 {
746 { "InferAndValidate", true }
747 });
748
749 networkOptions.push_back(shapeInferenceMethodOption);
750 }
751
752 m_Network = INetwork::Create(networkOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100753 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100754
telsoa01c577f2c2018-08-31 09:22:23 +0100755 if (m_Model->subgraphs.size() != 1)
756 {
757 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100758 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
759 m_Model->subgraphs.size(),
760 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100761 }
762
763 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +0100764 size_t operatorIndex = 0;
765 try
telsoa01c577f2c2018-08-31 09:22:23 +0100766 {
Colm Donelan6350d272020-06-09 16:56:25 +0100767 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100768 {
Colm Donelan6350d272020-06-09 16:56:25 +0100769 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
770 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +0100771 {
Colm Donelan6350d272020-06-09 16:56:25 +0100772 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
Jim Flynnfca233e2021-09-23 12:16:53 +0100773
774// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
775#if TF_MAJOR_VERSION > 2 || (TF_MAJOR_VERSION == 2 && TF_MINOR_VERSION > 3)
776 auto builtinCode = std::max(opCodePtr->builtin_code,
777 static_cast<tflite::BuiltinOperator>(opCodePtr->deprecated_builtin_code));
778#else
telsoa01c577f2c2018-08-31 09:22:23 +0100779 auto builtinCode = opCodePtr->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +0100780#endif
telsoa01c577f2c2018-08-31 09:22:23 +0100781
782 if (builtinCode > tflite::BuiltinOperator_MAX)
783 {
James Ward58dec6b2020-09-11 17:32:44 +0100784 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
785 "subgraph:{} operator idx:{}. {}",
786 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
787 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100788 }
789
790 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +0100791 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +0100792 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +0100793 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +0100794 }
telsoa01c577f2c2018-08-31 09:22:23 +0100795
Colm Donelan6350d272020-06-09 16:56:25 +0100796 SetupInputLayers(subgraphIndex);
797 SetupOutputLayers(subgraphIndex);
798 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100799
Colm Donelan6350d272020-06-09 16:56:25 +0100800 ++subgraphIndex;
801 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +0100802 }
telsoa01c577f2c2018-08-31 09:22:23 +0100803 }
Colm Donelan6350d272020-06-09 16:56:25 +0100804 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +0100805 {
Colm Donelan6350d272020-06-09 16:56:25 +0100806 std::stringstream errorString;
807 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
808 << subgraphIndex << " error: " << e.what();
809 ARMNN_LOG(error) << errorString.str();
810 std::stringstream errors;
811 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +0100812 throw ParseException(errors.str());
813 }
814
815 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +0100816 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100817 {
818 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
819 {
820 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
821 {
822 for (size_t inputSlotIdx = 0;
823 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
824 ++inputSlotIdx)
825 {
826 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
827 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
828 }
829 }
830 }
831 }
832
833 return std::move(m_Network);
834}
835
Kevin May7d96b162021-02-03 17:38:41 +0000836void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
837 size_t tensorIndex,
838 armnn::IOutputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +0100839{
840 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100841 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
842 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100843
844 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
845
846 // assuming there is only one producer for that tensor
847 if (tensorSlots.outputSlot != nullptr)
848 {
James Ward58dec6b2020-09-11 17:32:44 +0100849 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
850 "subgraph:{} tensor:{} {}",
851 subgraphIndex,
852 tensorIndex,
853 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100854 }
855
856 tensorSlots.outputSlot = slot;
857}
858
Kevin May7d96b162021-02-03 17:38:41 +0000859void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
860 size_t tensorIndex,
861 armnn::IInputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +0100862{
863 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100864 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
865 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100866
Finn Williamsd4fa5452021-03-01 12:31:41 +0000867 TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +0100868 tensorSlots.inputSlots.push_back(slot);
869}
870
Kevin May7d96b162021-02-03 17:38:41 +0000871void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100872{
873 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
874
875 // NOTE: By default we presume the custom operator is not supported
Kevin May7d96b162021-02-03 17:38:41 +0000876 auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100877
878 // Identify custom code defined for custom operator
879 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
880 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
881
882 // Find parser function that correspondes to custom code (if any)
883 auto iterator = m_CustomParserFunctions.find(customCode);
884 if (iterator != m_CustomParserFunctions.end())
885 {
886 customParserFunction = iterator->second;
887 }
888
889 // Run parser function
890 (this->*customParserFunction)(subgraphIndex, operatorIndex);
891}
892
Kevin May7d96b162021-02-03 17:38:41 +0000893void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100894{
895 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100896
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100897 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
898
899 auto opcodeIndex = operatorPtr->opcode_index;
Jim Flynnfca233e2021-09-23 12:16:53 +0100900
901// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
902#if TF_MAJOR_VERSION > 2 || (TF_MAJOR_VERSION == 2 && TF_MINOR_VERSION > 3)
903 auto opcode = std::max(m_Model->operator_codes[opcodeIndex]->builtin_code,
904 static_cast<tflite::BuiltinOperator>(m_Model->operator_codes[opcodeIndex]->deprecated_builtin_code));
905#else
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100906 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +0100907#endif
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100908
909 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
910 {
911 // Do not add StandInLayer, throw ParseException instead
912 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100913 fmt::format("Operator not supported. "
914 "subgraph:{} operator:{} "
915 "opcode_index:{} opcode:{} / {} {}",
916 subgraphIndex,
917 operatorIndex,
918 opcodeIndex,
919 opcode,
920 tflite::EnumNameBuiltinOperator(opcode),
921 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100922 }
923
924 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
925 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
926
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100927 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
928 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100929
930 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +0100931 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100932
933 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
934 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +0100935 ARMNN_ASSERT(layer != nullptr);
936
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100937 for (unsigned int i = 0u; i < numOutputs; ++i)
938 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100939 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100940 }
941
942 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
943 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
944
945 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
946 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100947}
948
mathad01b392e982021-04-07 12:07:30 +0100949void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex)
950{
951 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
952
953 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
954 CHECK_VALID_SIZE(inputs.size(), 1);
955 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
956 CHECK_VALID_SIZE(outputs.size(), 1);
957
958 auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex);
959
960 IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str());
961 ARMNN_ASSERT(layer != nullptr);
962
963 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
964 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
965
966 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
967 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
968
969 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
970 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
971}
972
Kevin May7d96b162021-02-03 17:38:41 +0000973void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100974{
975 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
976
977 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
978 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
979
980 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
981
982 Convolution2dDescriptor desc;
983 desc.m_BiasEnabled = false;
984 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
985 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000986 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100987 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
988 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000989
telsoa01c577f2c2018-08-31 09:22:23 +0100990 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
991 CHECK_VALID_SIZE(inputs.size(), 2, 3);
992
993 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
994 CHECK_VALID_SIZE(outputs.size(), 1);
995
996 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
997 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
998
999 // assuming input is NHWC
1000 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1001 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1002
1003 // assuming the filter is OHWI : Output, H, W, Input
1004 // which is essentially the same as NHWC
1005 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1006 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1007
Pablo Tellof0bd6832019-04-26 17:58:13 +01001008 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1009 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1010 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1011 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001012
Finn Williamsd4fa5452021-03-01 12:31:41 +00001013 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001014 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +01001015
James Ward58dec6b2020-09-11 17:32:44 +01001016 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001017
1018 if (inputs.size() == 3)
1019 {
1020 desc.m_BiasEnabled = true;
1021 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00001022 auto biasTensorAndData = CreateConstTensorNonPermuted(inputs[2], biasTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001023 layer = m_Network->AddConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001024 filterTensorAndData,
1025 Optional<ConstTensor>(biasTensorAndData),
telsoa01c577f2c2018-08-31 09:22:23 +01001026 layerName.c_str());
1027 }
1028 else
1029 {
1030 layer = m_Network->AddConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001031 filterTensorAndData,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001032 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +01001033 layerName.c_str());
1034 }
1035
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001036 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001037
Sadik Armagand109a4d2020-07-28 10:42:13 +01001038 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +00001039 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001040
1041 // register the input connection slots for the layer, connections are made after all layers have been created
1042 // only the tensors for the inputs are relevant, exclude the const tensors
1043 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001044 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +01001045
jimfly01c25411c2018-11-14 17:47:22 +00001046 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001047 // register the output connection slots for the layer, connections are made after all layers have been created
1048 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1049 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1050}
1051
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001052void TfLiteParserImpl::ParseConv3D(size_t subgraphIndex, size_t operatorIndex)
1053{
1054 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1055
1056 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1057 const auto* options = operatorPtr->builtin_options.AsConv3DOptions();
1058
1059 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1060
1061 Convolution3dDescriptor desc;
1062 desc.m_BiasEnabled = false;
1063 desc.m_DataLayout = armnn::DataLayout::NDHWC;
1064 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1065 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1066 desc.m_StrideZ = CHECKED_NON_NEGATIVE(options->stride_d);
1067 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1068 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
1069 desc.m_DilationZ = CHECKED_NON_NEGATIVE(options->dilation_d_factor);
1070
1071 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1072 CHECK_VALID_SIZE(inputs.size(), 2, 3);
1073
1074 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1075 CHECK_VALID_SIZE(outputs.size(), 1);
1076
1077 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1078 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1079
1080 // Assuming input is NDHWC
1081 unsigned int inputDepth = inputTensorInfo.GetShape()[1];
1082 unsigned int inputHeight = inputTensorInfo.GetShape()[2];
1083 unsigned int inputWidth = inputTensorInfo.GetShape()[3];
1084
1085 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
1086 unsigned int filterDepth = filterTensorInfo.GetShape()[0];
1087 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1088 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1089
1090 CalcPadding(inputDepth, filterDepth, desc.m_StrideZ,
1091 desc.m_DilationY, desc.m_PadFront, desc.m_PadBack, options->padding);
1092 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1093 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1094 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1095 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
1096
1097 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
1098
1099 armnn::IConnectableLayer* layer = nullptr;
1100 auto layerName = fmt::format("Conv3D:{}:{}", subgraphIndex, operatorIndex);
1101
1102 if (inputs.size() == 3)
1103 {
1104 desc.m_BiasEnabled = true;
1105 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
1106 auto biasTensorAndData = CreateConstTensorNonPermuted(inputs[2], biasTensorInfo);
1107 layer = m_Network->AddConvolution3dLayer(desc,
1108 filterTensorAndData,
1109 Optional<ConstTensor>(biasTensorAndData),
1110 layerName.c_str());
1111 }
1112 else
1113 {
1114 layer = m_Network->AddConvolution3dLayer(desc,
1115 filterTensorAndData,
1116 EmptyOptional(),
1117 layerName.c_str());
1118 }
1119
1120 ARMNN_ASSERT(layer != nullptr);
1121
1122 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1123 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1124
1125 // Register the input connection slots for the layer, connections are made after all layers have been created
1126 // only the tensors for the inputs are relevant, exclude the const tensors
1127 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1128 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1129
1130 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1131 // Register the output connection slots for the layer, connections are made after all layers have been created
1132 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1133 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1134}
1135
Kevin May7d96b162021-02-03 17:38:41 +00001136void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001137{
1138 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1139
1140 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1141 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
1142
1143 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1144
1145 DepthwiseConvolution2dDescriptor desc;
1146 desc.m_BiasEnabled = false;
1147 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1148 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001149 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001150 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +01001151
1152 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1153 CHECK_VALID_SIZE(inputs.size(), 2, 3);
1154 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1155 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +01001156 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1157 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001158
telsoa01c577f2c2018-08-31 09:22:23 +01001159 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Jan Eilers7612bd62021-04-06 17:29:03 +01001160 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
telsoa01c577f2c2018-08-31 09:22:23 +01001161
Matteo Martincigh747ef822018-12-18 09:26:39 +00001162 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +01001163 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1164 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +00001165
1166 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +01001167 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1168 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1169
Pablo Tellof0bd6832019-04-26 17:58:13 +01001170 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1171 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1172 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1173 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001174
Jan Eilers53ef7952021-06-02 12:01:25 +01001175 // ArmNN uses the same filter tensor layout at TfLite [1, H, W, O] no need for any permutation
1176 auto filterTensor = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001177 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001178 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001179
1180 if (inputs.size() == 3)
1181 {
1182 desc.m_BiasEnabled = true;
1183 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00001184 auto biasTensorAndData = CreateConstTensorNonPermuted(inputs[2], biasTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001185 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
Jan Eilers53ef7952021-06-02 12:01:25 +01001186 filterTensor,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001187 Optional<ConstTensor>(biasTensorAndData),
telsoa01c577f2c2018-08-31 09:22:23 +01001188 layerName.c_str());
1189 }
1190 else
1191 {
1192 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
Jan Eilers53ef7952021-06-02 12:01:25 +01001193 filterTensor,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001194 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +01001195 layerName.c_str());
1196 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001197 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001198
Sadik Armagand109a4d2020-07-28 10:42:13 +01001199 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +00001200 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001201
1202 // register the input connection slots for the layer, connections are made after all layers have been created
1203 // only the tensors for the inputs are relevant, exclude the const tensors
1204 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001205 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +01001206
jimfly01c25411c2018-11-14 17:47:22 +00001207 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001208 // register the output connection slots for the layer, connections are made after all layers have been created
1209 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1210 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1211}
1212
Kevin May7d96b162021-02-03 17:38:41 +00001213void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsed66d142019-12-06 09:55:55 +00001214{
1215 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1216
1217 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1218 CHECK_VALID_SIZE(inputs.size(), 1);
1219
1220 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1221 CHECK_VALID_SIZE(outputs.size(), 1);
1222
James Ward58dec6b2020-09-11 17:32:44 +01001223 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +00001224
1225 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001226 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +00001227
Sadik Armagand109a4d2020-07-28 10:42:13 +01001228 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Finn Williamsed66d142019-12-06 09:55:55 +00001229 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1230
1231 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1232 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1233
1234 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1235 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1236}
1237
Teresa Charlin3ab85482021-06-08 16:59:29 +01001238void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorIndex)
1239{
1240 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1241
1242 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1243 CHECK_VALID_SIZE(inputs.size(), 2);
1244
1245 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1246 CHECK_VALID_SIZE(outputs.size(), 1);
1247
1248 auto layerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
1249
1250 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1251 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1252
1253 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1254
1255 ReshapeDescriptor reshapeDesc;
Finn Williamsb49ed182021-06-29 15:50:08 +01001256
1257 if (outputTensorInfo.GetShape().AreAllDimensionsSpecified())
1258 {
1259 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1260 }
1261 else
1262 {
1263 int32_t axis = inputs[1]->shape[0];
1264
1265 int32_t inputDimSize = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
1266
1267 if (axis > inputDimSize || axis < 0 - (inputDimSize + 1))
1268 {
1269 throw ParseException("axis must be in range [0 - (inputDimSize + 1), inputDimSize] inclusive");
1270 }
1271
1272 if(axis < 0)
1273 {
1274 axis = inputDimSize + axis + 1;
1275 }
1276
Rob Hughesd812a312021-08-06 13:10:53 +01001277 std::vector<unsigned int> shape(static_cast<unsigned int>(inputDimSize) + 1);
Finn Williamsb49ed182021-06-29 15:50:08 +01001278 unsigned int inputShapeIndex = 0;
1279 for (unsigned int i = 0; i < static_cast<unsigned int>(inputDimSize + 1); ++i)
1280 {
1281 if (i == static_cast<unsigned int>(axis))
1282 {
1283 shape[i] = 1;
1284 }
1285 else
1286 {
1287 shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
1288 ++inputShapeIndex;
1289 }
1290 }
1291
Rob Hughesd812a312021-08-06 13:10:53 +01001292 reshapeDesc.m_TargetShape = TensorShape(static_cast<unsigned int>(inputDimSize + 1), shape.data());
Finn Williamsb49ed182021-06-29 15:50:08 +01001293 }
Teresa Charlin3ab85482021-06-08 16:59:29 +01001294
1295 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1296 ARMNN_ASSERT(layer != nullptr);
1297 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1298
1299 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1300 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1301
1302 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1303 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1304}
1305
Kevin May7d96b162021-02-03 17:38:41 +00001306void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
Keith Davis4cd29a02019-09-09 14:49:20 +01001307{
1308 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1309
1310 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001311 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001312
1313 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1314 CHECK_VALID_SIZE(outputs.size(), 1);
1315
James Ward58dec6b2020-09-11 17:32:44 +01001316 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001317 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001318
josh minorba424d22019-11-13 10:55:17 -06001319 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001320 {
1321 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1322 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001323 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1324 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001325 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001326 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001327
Mike Kelly08759e22020-03-02 11:41:31 +00001328 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001329 }
1330
James Conroy05102392020-06-24 15:39:55 +01001331 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001332 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001333 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001334
James Conroy05102392020-06-24 15:39:55 +01001335 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001336 ARMNN_ASSERT(layer != nullptr);
Keith Davis4cd29a02019-09-09 14:49:20 +01001337 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1338
1339 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1340 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1341
1342 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1343 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1344}
1345
Kevin May7d96b162021-02-03 17:38:41 +00001346void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001347{
1348 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1349
1350 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1351 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1352
1353 TransposeConvolution2dDescriptor desc;
1354 desc.m_BiasEnabled = false;
1355 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1356 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1357 desc.m_DataLayout = armnn::DataLayout::NHWC;
1358
1359 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
David Monahan61683802021-01-12 09:11:07 +00001360 if (inputs.size() == 4)
1361 {
1362 desc.m_BiasEnabled = true;
1363 }
1364 else
1365 {
1366 CHECK_VALID_SIZE(inputs.size(), 3);
1367 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001368
1369 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1370 CHECK_VALID_SIZE(outputs.size(), 1);
1371
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001372 if (inputs[0])
1373 {
1374 armnn::TensorInfo tensorInfo = ToTensorInfo(inputs[0]);
1375 std::vector<int> output_shape(tensorInfo.GetNumElements());
1376 if (tensorInfo.GetDataType() == DataType::Signed32)
1377 {
1378 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1379 }
1380 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1381 {
1382 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1383 {
1384 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1385 }
1386 }
1387 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1388 for (int dimension : output_shape)
1389 {
1390 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1391 }
1392 desc.m_OutputShapeEnabled = true;
1393 }
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001394 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001395 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1396
1397 // TfLite uses NHWC tensors
1398 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1399 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1400
1401 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1402 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1403
1404 CalcPadding(inputHeight,
1405 filterHeight,
1406 desc.m_StrideY,
1407 1, // DilationY
1408 desc.m_PadTop,
1409 desc.m_PadBottom,
1410 options->padding);
1411
1412 CalcPadding(inputWidth,
1413 filterWidth,
1414 desc.m_StrideX,
1415 1, // DilationX
1416 desc.m_PadLeft,
1417 desc.m_PadRight,
1418 options->padding);
1419
Finn Williamsd4fa5452021-03-01 12:31:41 +00001420 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001421
1422 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001423 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001424
David Monahan61683802021-01-12 09:11:07 +00001425 if (desc.m_BiasEnabled)
1426 {
1427 auto biasTensorInfo = ToTensorInfo(inputs[3]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00001428 auto biasConstTensor = CreateConstTensorNonPermuted(inputs[3], biasTensorInfo);
David Monahan61683802021-01-12 09:11:07 +00001429 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001430 filterTensorAndData,
1431 biasConstTensor,
David Monahan61683802021-01-12 09:11:07 +00001432 layerName.c_str());
1433 }
1434 else
1435 {
1436 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001437 filterTensorAndData,
David Monahan61683802021-01-12 09:11:07 +00001438 EmptyOptional(),
1439 layerName.c_str());
1440 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001441
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001442 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001443
Sadik Armagand109a4d2020-07-28 10:42:13 +01001444 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001445 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1446
1447 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1448 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001449 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001450
1451 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1452 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1453}
1454
Kevin May7d96b162021-02-03 17:38:41 +00001455void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001456{
1457 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1458}
1459
Kevin May7d96b162021-02-03 17:38:41 +00001460void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001461{
1462 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1463
1464 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1465 CHECK_VALID_SIZE(inputs.size(), 3);
1466
1467 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1468 CHECK_VALID_SIZE(outputs.size(), 1);
1469
1470 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1471 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1472
1473 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1474 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1475
1476 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1477 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1478
1479 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1480 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1481
1482 size_t step = 2;
1483 std::vector<std::pair<unsigned int, unsigned int>> crops;
1484 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1485 {
1486 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1487 }
1488
1489 armnn::BatchToSpaceNdDescriptor desc;
1490 desc.m_BlockShape = blockShape;
1491 desc.m_Crops = crops;
1492 desc.m_DataLayout = armnn::DataLayout::NHWC;
1493
James Ward58dec6b2020-09-11 17:32:44 +01001494 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001495
James Conroy05102392020-06-24 15:39:55 +01001496 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001497 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001498 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1499
1500 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1501 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001502 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1503
1504 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1505 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1506
1507 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1508 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1509}
1510
Kevin May7d96b162021-02-03 17:38:41 +00001511void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson28c94572019-07-18 10:47:03 +01001512{
1513 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1514
1515 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1516 CHECK_VALID_SIZE(inputs.size(), 1);
1517
1518 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1519 CHECK_VALID_SIZE(outputs.size(), 1);
1520
1521 L2NormalizationDescriptor desc;
1522 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001523 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001524 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1525
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001526 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001527
Sadik Armagand109a4d2020-07-28 10:42:13 +01001528 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson28c94572019-07-18 10:47:03 +01001529 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1530
1531 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1532 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1533
1534 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1535 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1536}
1537
Kevin May7d96b162021-02-03 17:38:41 +00001538void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001539{
1540 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1541}
1542
Kevin May7d96b162021-02-03 17:38:41 +00001543void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001544{
1545 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1546
1547 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1548 CHECK_VALID_SIZE(inputs.size(), 2);
1549
1550 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1551 CHECK_VALID_SIZE(outputs.size(), 1);
1552
James Ward58dec6b2020-09-11 17:32:44 +01001553 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001554
1555 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1556 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1557 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001558
Sadik Armagand109a4d2020-07-28 10:42:13 +01001559 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001560 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1561
1562 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1563 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001564 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1565
1566 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001567 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001568
1569 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1570 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1571}
1572
Kevin May7d96b162021-02-03 17:38:41 +00001573void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001574{
1575 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1576
1577 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1578 CHECK_VALID_SIZE(inputs.size(), 2);
1579
1580 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1581 CHECK_VALID_SIZE(outputs.size(), 1);
1582
James Ward58dec6b2020-09-11 17:32:44 +01001583 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001584
1585 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1586 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1587 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001588
Sadik Armagand109a4d2020-07-28 10:42:13 +01001589 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001590 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1591
1592 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1593 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001594 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1595
1596 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001597 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001598
1599 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1600 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1601}
1602
Kevin May7d96b162021-02-03 17:38:41 +00001603void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
1604 size_t operatorIndex,
1605 PoolingAlgorithm algorithm)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001606{
1607 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1608
1609 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1610 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1611
1612 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1613
1614 std::string layerName;
1615
1616 switch (algorithm)
1617 {
1618 case PoolingAlgorithm::Average:
1619 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001620 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001621 break;
1622 case PoolingAlgorithm::Max:
1623 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001624 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001625 break;
1626 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001627 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001628 }
1629
1630 Pooling2dDescriptor desc;
1631
1632 desc.m_PoolType = algorithm;
1633 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1634 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1635 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1636 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1637 desc.m_PaddingMethod = PaddingMethod::Exclude;
1638 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001639 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001640
1641 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1642 CHECK_VALID_SIZE(inputs.size(), 1);
1643 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1644
1645 // assuming input is NHWC
1646 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1647 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1648
Pablo Tellof0bd6832019-04-26 17:58:13 +01001649 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1650 desc.m_PadTop, desc.m_PadBottom, options->padding);
1651 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1652 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001653
1654 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1655 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001656
Sadik Armagand109a4d2020-07-28 10:42:13 +01001657 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001658 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1659
1660 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1661 ARMNN_ASSERT(layer != nullptr);
jimfly01c25411c2018-11-14 17:47:22 +00001662 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001663
1664 // register the input connection slots for the layer, connections are made after all layers have been created
1665 // only the tensors for the inputs are relevant, exclude the const tensors
1666 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001667 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001668
jimfly01c25411c2018-11-14 17:47:22 +00001669 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001670 // register the output connection slots for the layer, connections are made after all layers have been created
1671 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1672 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1673}
1674
Kevin May7d96b162021-02-03 17:38:41 +00001675void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
josh minorba424d22019-11-13 10:55:17 -06001676{
1677 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1678
1679 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1680 CHECK_VALID_SIZE(inputs.size(), 3);
1681 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1682 CHECK_VALID_SIZE(outputs.size(), 1);
1683
1684 SliceDescriptor desc;
1685
1686 // set begin tensor info for slice descriptor
1687 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1688 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1689
1690 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1691 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1692
1693 // set size tensor info for slice descriptor
1694 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1695 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1696
Mike Kelly7ba84d62021-09-10 15:27:19 +01001697 std::vector<int> signedSize(sizeTensorInfo.GetNumElements());
1698 ::memcpy(signedSize.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
josh minorba424d22019-11-13 10:55:17 -06001699 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
Mike Kelly7ba84d62021-09-10 15:27:19 +01001700 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1701
1702 for (unsigned int i = 0; i < signedSize.size(); ++i)
1703 {
1704 int signedValue = signedSize[i];
Jim Flynnfca233e2021-09-23 12:16:53 +01001705
Mike Kelly7ba84d62021-09-10 15:27:19 +01001706 if (signedValue < -1 || signedValue > static_cast<int>(inputTensorInfo.GetShape()[i] - begin[i]))
1707 {
1708 throw ParseException(fmt::format("Invalid value for size {} size must be in range "
1709 "[-1, inputDimSize - begin] [-1, {}] inclusive {}",
1710 signedValue,
1711 inputTensorInfo.GetShape()[i] - begin[i],
1712 CHECK_LOCATION().AsString()));
1713 }
1714
1715 if (signedValue == -1)
1716 {
1717 size[i] = inputTensorInfo.GetShape()[i] - begin[i];
1718 }
1719 else
1720 {
1721 size[i] = static_cast<unsigned int>(signedValue);
1722 }
1723 }
1724
josh minorba424d22019-11-13 10:55:17 -06001725 desc = SliceDescriptor(begin, size);
1726
James Ward58dec6b2020-09-11 17:32:44 +01001727 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06001728
Sadik Armagand109a4d2020-07-28 10:42:13 +01001729 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001730 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1731
1732 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
josh minorba424d22019-11-13 10:55:17 -06001733 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1734
1735 // register the input connection slots for the layer, connections are made after all layers have been created
1736 // only the tensors for the inputs are relevant, exclude the const tensors
1737 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1738 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1739
1740 // register the output connection slots for the layer, connections are made after all layers have been created
1741 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1742 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1743}
1744
Kevin May7d96b162021-02-03 17:38:41 +00001745void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001746{
1747 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1748 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1749 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1750
1751 SoftmaxDescriptor desc;
1752 desc.m_Beta = options->beta;
1753
1754 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1755 CHECK_VALID_SIZE(inputs.size(), 1);
1756 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1757 CHECK_VALID_SIZE(outputs.size(), 1);
1758
James Ward58dec6b2020-09-11 17:32:44 +01001759 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001760 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1761
Sadik Armagand109a4d2020-07-28 10:42:13 +01001762 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
telsoa01c577f2c2018-08-31 09:22:23 +01001763 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1764
1765 // register the input connection slots for the layer, connections are made after all layers have been created
1766 // only the tensors for the inputs are relevant, exclude the const tensors
1767 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1768 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1769
1770 // register the output connection slots for the layer, connections are made after all layers have been created
1771 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1772 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1773}
1774
Kevin May7d96b162021-02-03 17:38:41 +00001775void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001776{
1777 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1778
1779 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1780 CHECK_VALID_SIZE(inputs.size(), 3);
1781
1782 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1783 CHECK_VALID_SIZE(outputs.size(), 1);
1784
1785 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1786 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1787
1788 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1789 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1790
1791 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1792 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1793
1794 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1795 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1796
1797 size_t step = 2;
1798 std::vector<std::pair<unsigned int, unsigned int>> padList;
1799 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1800 {
1801 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1802 }
1803
1804 armnn::SpaceToBatchNdDescriptor desc;
1805 desc.m_BlockShape = blockShape;
1806 desc.m_PadList = padList;
1807 desc.m_DataLayout = armnn::DataLayout::NHWC;
1808
James Ward58dec6b2020-09-11 17:32:44 +01001809 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001810
James Conroy05102392020-06-24 15:39:55 +01001811 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001812 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001813 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1814
1815 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1816 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001817 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1818
1819 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1820 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1821
1822 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1823 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1824}
1825
Teresa Charlin3ab85482021-06-08 16:59:29 +01001826armnn::TensorInfo TfLiteParserImpl::OutputShapeOfSqueeze(std::vector<uint32_t> squeezeDims,
Kevin May7d96b162021-02-03 17:38:41 +00001827 const armnn::TensorInfo & inputTensorInfo)
telsoa01c577f2c2018-08-31 09:22:23 +01001828{
Teresa Charlin3ab85482021-06-08 16:59:29 +01001829 CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
telsoa01c577f2c2018-08-31 09:22:23 +01001830 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1831
1832 if (inputTensorInfo.GetNumDimensions() > 4)
1833 {
1834 std::stringstream ss;
1835 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1836 << " shape:" << inputTensorInfo.GetShape() << " "
1837 << CHECK_LOCATION().AsString();
1838 throw ParseException(ss.str());
1839 }
1840
1841 if (squeezeDims.empty())
1842 {
1843 squeezeDims.assign(dimensionSequence,
1844 dimensionSequence+inputTensorInfo.GetNumDimensions());
1845 }
1846
1847 std::vector<uint32_t> outputDims;
1848 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1849 {
1850 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1851 auto currentDimension = inputTensorInfo.GetShape()[i];
1852 if (skipSqueeze || currentDimension != 1)
1853 {
1854 outputDims.push_back(currentDimension);
1855 }
1856 }
1857
1858 if (outputDims.size() > 4)
1859 {
1860 std::stringstream ss;
1861 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1862 << " shape:" << inputTensorInfo.GetShape() << " "
1863 << CHECK_LOCATION().AsString();
1864 throw ParseException(ss.str());
1865 }
1866
1867 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1868 outputDims.data());
1869
1870 // we need to preserve the tensor type and the quantization data as well
1871 TensorInfo outTensorInfo = inputTensorInfo;
1872 outTensorInfo.SetShape(outShape);
1873
1874 return outTensorInfo;
1875}
1876
Keith Davis0176fd82021-06-01 17:36:32 +01001877void TfLiteParserImpl::ParseShape(size_t subgraphIndex, size_t operatorIndex)
1878{
1879 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1880
1881 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1882 CHECK_VALID_SIZE(inputs.size(), 1);
1883 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1884 CHECK_VALID_SIZE(outputs.size(), 1);
1885
1886 auto layerName = fmt::format("Shape:{}:{}", subgraphIndex, operatorIndex);
1887
1888 IConnectableLayer* layer = m_Network->AddShapeLayer(layerName.c_str());
1889 ARMNN_ASSERT(layer != nullptr);
1890
1891
1892 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1893 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1894
1895 // Check if output tensor type is Signed32 or Signed64
1896 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
1897 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
1898 {
1899 throw ParseException(
1900 fmt::format(
1901 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
1902 CHECK_LOCATION().AsString()));
1903 }
1904
1905 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1906 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1907
1908 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1909 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1910}
1911
Kevin May7d96b162021-02-03 17:38:41 +00001912void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001913{
1914 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1915
1916 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1917 CHECK_VALID_SIZE(inputs.size(), 1);
1918
1919 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1920 CHECK_VALID_SIZE(outputs.size(), 1);
1921
1922 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1923 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01001924 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001925
1926 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001927
1928 std::vector<uint32_t> squeezeDim;
1929 // A single negative dim index is interpreted as a negative index in python
1930 // Meaning the index will be the shape size plus the negative index value
1931 if (options->squeeze_dims.size() == 1 && options->squeeze_dims[0] < 0)
1932 {
1933 int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
1934 squeezeDim.push_back(static_cast<uint32_t>(dim));
1935 }
1936 else
1937 {
1938 squeezeDim = AsUnsignedVector(options->squeeze_dims);
1939 }
1940
1941 armnn::TensorInfo outputTensorInfo = TfLiteParserImpl::OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
1942
James Conroy05102392020-06-24 15:39:55 +01001943 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01001944
1945 ReshapeDescriptor reshapeDesc;
1946 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1947
telsoa01c577f2c2018-08-31 09:22:23 +01001948 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001949 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001950 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1951
1952 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1953 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1954
1955 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1956 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1957}
1958
Kevin May7d96b162021-02-03 17:38:41 +00001959void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001960{
1961 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1962
1963 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1964 CHECK_VALID_SIZE(inputs.size(), 4);
1965
1966 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1967 CHECK_VALID_SIZE(outputs.size(), 1);
1968
1969 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1970 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1971
1972 StridedSliceDescriptor desc;
1973 desc.m_BeginMask = options->begin_mask;
1974 desc.m_EllipsisMask = options->ellipsis_mask;
1975 desc.m_EndMask = options->end_mask;
1976 desc.m_NewAxisMask = options->new_axis_mask;
1977 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1978 desc.m_DataLayout = armnn::DataLayout::NHWC;
1979
1980 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1981 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1982
1983 std::vector<int> begin(beginTensorInfo.GetNumElements());
1984 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1985
1986 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1987 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1988
1989 std::vector<int> end(endTensorInfo.GetNumElements());
1990 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1991
1992 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1993 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1994
1995 std::vector<int> stride(strideTensorInfo.GetNumElements());
1996 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1997
1998 desc.m_Begin = begin;
1999 desc.m_End = end;
2000 desc.m_Stride = stride;
2001
James Ward58dec6b2020-09-11 17:32:44 +01002002 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002003 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002004 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002005
Sadik Armagand109a4d2020-07-28 10:42:13 +01002006 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002007 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2008
2009 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2010 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2011
2012 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2013 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2014}
2015
Kevin May7d96b162021-02-03 17:38:41 +00002016void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002017{
2018 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2019
2020 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2021 const auto * options = operatorPtr->builtin_options.AsSubOptions();
2022
2023 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2024 CHECK_VALID_SIZE(inputs.size(), 2);
2025
2026 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2027 CHECK_VALID_SIZE(outputs.size(), 1);
2028
2029 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2030 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
2031
James Ward58dec6b2020-09-11 17:32:44 +01002032 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002033 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002034 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002035
Sadik Armagand109a4d2020-07-28 10:42:13 +01002036 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002037 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2038
2039 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002040 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002041
2042 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2043
2044 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2045 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2046}
2047
Kevin May7d96b162021-02-03 17:38:41 +00002048void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302049{
2050 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2051
2052 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2053 const auto * options = operatorPtr->builtin_options.AsDivOptions();
2054
2055 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2056 CHECK_VALID_SIZE(inputs.size(), 2);
2057
2058 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2059 CHECK_VALID_SIZE(outputs.size(), 1);
2060
2061 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2062 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
2063
James Ward58dec6b2020-09-11 17:32:44 +01002064 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302065 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002066 ARMNN_ASSERT(layer != nullptr);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302067
Sadik Armagand109a4d2020-07-28 10:42:13 +01002068 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302069 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2070
2071 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002072 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302073 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2074
2075 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2076 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2077}
2078
Kevin May7d96b162021-02-03 17:38:41 +00002079void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002080{
2081 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2082
2083 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2084 const auto * options = operatorPtr->builtin_options.AsAddOptions();
2085
2086 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2087 CHECK_VALID_SIZE(inputs.size(), 2);
2088
2089 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2090 CHECK_VALID_SIZE(outputs.size(), 1);
2091
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002092 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2093 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
2094
James Ward58dec6b2020-09-11 17:32:44 +01002095 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002096 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002097 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002098
Sadik Armagand109a4d2020-07-28 10:42:13 +01002099 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002100 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2101
2102 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002103 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002104 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2105
2106 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2107 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2108}
2109
Kevin May7d96b162021-02-03 17:38:41 +00002110void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002111{
2112 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2113
2114 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2115 const auto * options = operatorPtr->builtin_options.AsMulOptions();
2116
2117 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2118 CHECK_VALID_SIZE(inputs.size(), 2);
2119
2120 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2121 CHECK_VALID_SIZE(outputs.size(), 1);
2122
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002123 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2124 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
2125
James Ward58dec6b2020-09-11 17:32:44 +01002126 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002127 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002128 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002129
Sadik Armagand109a4d2020-07-28 10:42:13 +01002130 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002131 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2132
2133 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002134 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002135 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2136
2137 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2138 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2139}
2140
Kevin May7d96b162021-02-03 17:38:41 +00002141void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002142{
2143 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2144
2145 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2146
2147 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2148 CHECK_VALID_SIZE(outputs.size(), 1);
2149
2150 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
2151 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2152
2153 armnn::MeanDescriptor desc;
2154 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
2155 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
2156 desc.m_Axis = axis;
2157
2158 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002159 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002160
2161 desc.m_KeepDims =
2162 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
2163 true : false;
2164
James Ward58dec6b2020-09-11 17:32:44 +01002165 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002166 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002167 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002168
2169 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2170
2171 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2172 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2173
2174 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2175 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2176}
2177
Kevin May7d96b162021-02-03 17:38:41 +00002178void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002179{
2180 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2181
Kevin May7d96b162021-02-03 17:38:41 +00002182 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002183
Kevin May7d96b162021-02-03 17:38:41 +00002184 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002185 CHECK_VALID_SIZE(outputs.size(), 1);
2186
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002187 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2188
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002189 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
2190 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2191
2192 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
2193 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
2194
2195 size_t step = 2;
2196 armnn::PadDescriptor desc;
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002197 if (inputTensorInfo.IsQuantized())
2198 {
2199 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2200 }
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002201 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2202 {
2203 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2204 }
2205
James Ward58dec6b2020-09-11 17:32:44 +01002206 auto layerName = fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002207 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002208
2209 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
2210 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002211 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2212
2213 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2214 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2215
2216 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2217 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2218}
2219
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002220void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
2221{
2222 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2223
2224 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2225 CHECK_VALID_SIZE(inputs.size(), 2);
2226
2227 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2228 CHECK_VALID_SIZE(outputs.size(), 1);
2229
2230 auto layerName = fmt::format("Prelu:{}:{}", subgraphIndex, operatorIndex);
2231
2232 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2233 armnn::TensorInfo alphaTensorInfo = ToTensorInfo(inputs[1]);
2234 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
2235 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2236
2237 IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
2238 ARMNN_ASSERT(layer != nullptr);
2239 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2240
2241 if (IsConstTensor(inputs[1]))
2242 {
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002243 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawaratbf99b5f2021-05-27 09:55:43 +01002244 armnn::IInputSlot* slot = &(layer->GetInputSlot(0));
2245 RegisterConsumerOfTensor(subgraphIndex, inputTensorIndexes[0], slot);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002246
2247 auto alphaTensorAndData = CreateConstTensorNonPermuted(inputs[1], alphaTensorInfo);
2248 std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name);
2249 IConnectableLayer* constLayer =
2250 m_Network->AddConstantLayer(alphaTensorAndData, constLayerName.c_str());
2251 ARMNN_ASSERT(constLayer != nullptr);
2252
2253 constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
2254 constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
2255 RegisterOutputSlots(subgraphIndex,
2256 VIRTUAL_OPERATOR_ID,
2257 constLayer,
2258 { inputTensorIndexes[1] });
2259 }
2260 else
2261 {
2262 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2263 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIndexes);
2264 }
2265
2266 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2267 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2268}
2269
Kevin May7d96b162021-02-03 17:38:41 +00002270void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan66dedc72019-12-10 16:32:07 +00002271{
2272 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2273
2274 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2275 CHECK_VALID_SIZE(inputs.size(), 1);
2276
2277 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2278 CHECK_VALID_SIZE(outputs.size(), 1);
2279
James Ward58dec6b2020-09-11 17:32:44 +01002280 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002281
2282 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002283 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002284
Sadik Armagand109a4d2020-07-28 10:42:13 +01002285 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002286 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2287
2288 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2289 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2290
2291 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2292 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2293}
Finn Williamsc42c3842019-01-22 14:18:11 +00002294
Kevin May7d96b162021-02-03 17:38:41 +00002295void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002296{
Finn Williamsc42c3842019-01-22 14:18:11 +00002297 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01002298}
2299
Kevin May7d96b162021-02-03 17:38:41 +00002300void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002301{
Finn Williamsc42c3842019-01-22 14:18:11 +00002302 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
2303}
Sadik Armagan58f39192018-09-17 14:14:39 +01002304
Kevin May7d96b162021-02-03 17:38:41 +00002305void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan12239e72020-05-27 11:06:17 +01002306{
Jan Eilers2f746b32020-07-28 14:00:06 +01002307 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01002308}
2309
Kevin May7d96b162021-02-03 17:38:41 +00002310void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsc42c3842019-01-22 14:18:11 +00002311{
2312 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
2313}
2314
Kevin May7d96b162021-02-03 17:38:41 +00002315void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd99851762019-04-09 09:37:38 +01002316{
2317 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
2318}
2319
Kevin May7d96b162021-02-03 17:38:41 +00002320void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
Matthew Sloyan7515d072020-12-16 12:50:01 +00002321{
2322 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
2323}
2324
Kevin May7d96b162021-02-03 17:38:41 +00002325void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
Jan Eilers2f746b32020-07-28 14:00:06 +01002326{
2327 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
2328}
Finn Williamsc42c3842019-01-22 14:18:11 +00002329
Kevin May7d96b162021-02-03 17:38:41 +00002330void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
Finn Williamsc42c3842019-01-22 14:18:11 +00002331{
2332 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01002333 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00002334 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01002335
2336 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2337 CHECK_VALID_SIZE(inputs.size(), 1);
2338
2339 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2340 CHECK_VALID_SIZE(outputs.size(), 1);
2341
James Ward58dec6b2020-09-11 17:32:44 +01002342 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01002343 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00002344 activationDesc.m_Function = activationType;
2345
2346 switch (activationType)
2347 {
2348 case ActivationFunction::ReLu:
2349 {
James Ward58dec6b2020-09-11 17:32:44 +01002350 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002351 break;
2352 }
2353 case ActivationFunction::BoundedReLu:
2354 {
James Ward58dec6b2020-09-11 17:32:44 +01002355 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002356 activationDesc.m_A = 6.0f;
2357 activationDesc.m_B = 0.0f;
2358 break;
2359 }
2360 case ActivationFunction::Sigmoid:
2361 {
James Ward58dec6b2020-09-11 17:32:44 +01002362 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002363 break;
2364 }
Nina Drozd99851762019-04-09 09:37:38 +01002365 case ActivationFunction::TanH:
2366 {
James Ward58dec6b2020-09-11 17:32:44 +01002367 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01002368 activationDesc.m_A = 1.0f;
2369 activationDesc.m_B = 1.0f;
2370 break;
2371 }
Sadik Armagan12239e72020-05-27 11:06:17 +01002372 case ActivationFunction::LeakyReLu:
2373 {
James Ward58dec6b2020-09-11 17:32:44 +01002374 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan12239e72020-05-27 11:06:17 +01002375 const auto * options = operatorPtr->builtin_options.AsLeakyReluOptions();
2376 activationDesc.m_A = options->alpha;
2377 break;
2378 }
Matthew Sloyan7515d072020-12-16 12:50:01 +00002379 case ActivationFunction::Elu:
2380 {
2381 layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
2382 activationDesc.m_A = 1.0f;
2383 break;
2384 }
Jan Eilers2f746b32020-07-28 14:00:06 +01002385 case ActivationFunction::HardSwish:
Matthew Sloyan7515d072020-12-16 12:50:01 +00002386 {
James Ward58dec6b2020-09-11 17:32:44 +01002387 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01002388 break;
Matthew Sloyan7515d072020-12-16 12:50:01 +00002389 }
Finn Williamsc42c3842019-01-22 14:18:11 +00002390 default:
2391 {
2392 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002393 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
2394 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00002395 }
2396 }
2397
2398 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01002399
Sadik Armagand109a4d2020-07-28 10:42:13 +01002400 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan58f39192018-09-17 14:14:39 +01002401 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2402
2403 // register the input connection slots for the layer, connections are made after all layers have been created
2404 // only the tensors for the inputs are relevant, exclude the const tensors
2405 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2406 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2407
2408 // register the output connection slots for the layer, connections are made after all layers have been created
2409 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2410 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2411}
Kevin May7d96b162021-02-03 17:38:41 +00002412armnn::TensorInfo TfLiteParserImpl::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
2413 const std::vector<int32_t> & targetDimsIn)
Sadikb94967b2018-09-19 15:30:00 +01002414{
2415 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2416 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2417
2418 if (stretchDim != targetDimsIn.end())
2419 {
2420 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2421 {
2422 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002423 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01002424 }
2425
2426 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002427 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01002428 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2429
2430 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2431 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2432 }
2433
2434 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2435
2436 TensorInfo reshapeInfo = inputTensorInfo;
2437 reshapeInfo.SetShape(outputShape);
2438
2439 return reshapeInfo;
2440}
2441
Kevin May7d96b162021-02-03 17:38:41 +00002442void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
Sadikb94967b2018-09-19 15:30:00 +01002443{
2444 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2445
2446 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002447
2448 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2449 CHECK_VALID_SIZE(outputs.size(), 1);
2450
2451 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2452 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002453 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002454
2455 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00002456 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01002457 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00002458
Jan Eilersbac9b352020-07-13 13:40:24 +01002459 // Extracting new shape for the output
2460 // There are two ways it can be passed
2461 // * First is to define the target shape in the operator built-in options
2462 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00002463 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01002464 bool targetShapeFound = false;
2465 // Check if built-in options were given
2466 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00002467 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002468 // make sure the parameter is given
2469 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00002470 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002471 targetShape = options->new_shape;
2472 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00002473 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002474 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002475
2476 // If there is no built-in option given or if the built-in new_shape parameter was empty
2477 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00002478 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002479 // Check for a second input tensor
2480 if (inputs.size() > 1 && inputs[1] != nullptr)
2481 {
2482 if (inputs[1]->is_variable)
2483 {
2484 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
2485 }
2486
2487 if (inputs[1]->shape.size() != 1)
2488 {
2489 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
2490 }
2491
2492 if (inputs[1]->type != tflite::TensorType_INT32)
2493 {
2494 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
2495 }
2496
2497 // Extract target shape from input
2498 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2499 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
Sadik Armagan19a1c032021-01-20 12:17:00 +00002500 if (!values)
2501 {
2502 ARMNN_THROW_PARSE_EXCEPTION("Reshape operator target shape input buffer data is null");
2503 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002504 for (int i=0; i < inputs[1]->shape[0]; ++i)
2505 {
2506 targetShape.push_back(values[i]);
2507 }
2508 }
2509 else
Derek Lambertic9e52792020-03-11 11:42:26 +00002510 {
2511 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
2512 "At least one method required");
2513 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002514 }
2515
kevmay0171972a82018-12-17 14:28:03 +00002516 armnn::TensorInfo reshapeOutputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00002517 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01002518
kevmay0171972a82018-12-17 14:28:03 +00002519 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002520 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
2521 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00002522 {
2523 std::stringstream ss;
2524 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002525 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00002526 << " does not equal output shape "
2527 << actualOutputTensorInfo.GetShape()
2528 << ": "
2529 << CHECK_LOCATION().AsString();
2530 throw ParseException(ss.str());
2531 }
2532
Sadikb94967b2018-09-19 15:30:00 +01002533 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00002534 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01002535
Sadikb94967b2018-09-19 15:30:00 +01002536 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002537 ARMNN_ASSERT(layer != nullptr);
kevmay0171972a82018-12-17 14:28:03 +00002538 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01002539
2540 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2541 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2542
2543 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2544 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2545}
2546
Kevin May7d96b162021-02-03 17:38:41 +00002547void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002548{
Sadik Armagana3b31f02019-12-05 09:08:53 +00002549 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
2550}
2551
Kevin May7d96b162021-02-03 17:38:41 +00002552void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagana3b31f02019-12-05 09:08:53 +00002553{
2554 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
2555}
2556
Kevin May7d96b162021-02-03 17:38:41 +00002557void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
Sadik Armagana3b31f02019-12-05 09:08:53 +00002558{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002559 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2560
2561 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2562 CHECK_VALID_SIZE(inputs.size(), 2);
2563
2564 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2565 CHECK_VALID_SIZE(outputs.size(), 1);
2566
2567 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
2568
2569 // Data for the parsed tensor args (size) must be stored locally.
2570 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2571
2572 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2573 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2574
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002575 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002576 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002577 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002578 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2579 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002580
James Ward58dec6b2020-09-11 17:32:44 +01002581 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00002582
2583 switch (resizeMethod)
2584 {
2585 case ResizeMethod::Bilinear:
2586 {
James Ward58dec6b2020-09-11 17:32:44 +01002587 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002588
2589 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2590 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2591
David Monahan4a0c9b92020-05-30 09:48:39 +01002592 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002593 break;
2594 }
2595 case ResizeMethod::NearestNeighbor:
2596 {
James Ward58dec6b2020-09-11 17:32:44 +01002597 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00002598 break;
2599 }
2600 default:
2601 {
2602 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002603 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
2604 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00002605 }
2606 }
2607
James Conroy05102392020-06-24 15:39:55 +01002608 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002609 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002610 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2611
2612 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
2613 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002614 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2615
2616 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2617 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2618
2619 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2620 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2621}
2622
Kevin May7d96b162021-02-03 17:38:41 +00002623void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan479045b2018-10-01 11:51:37 +01002624{
2625 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2626
2627 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2628 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2629
2630 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2631
2632 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2633 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2634 CHECK_VALID_SIZE(outputs.size(), 1);
2635
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002636 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2637 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002638
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002639 const unsigned int concatDimInput = static_cast<unsigned int>(
2640 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002641
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002642 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2643 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002644
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002645 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002646
2647 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2648 {
2649 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2650
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002651 // This set up concatDescriptor view origin
2652 armnnUtils::ProcessConcatInputTensorInfo(
2653 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002654 }
2655
James Ward58dec6b2020-09-11 17:32:44 +01002656 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002657 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002658
Jim Flynn906f9462019-05-10 13:55:21 +01002659 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002660 ARMNN_ASSERT(layer != nullptr);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002661 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002662
James Conroy05102392020-06-24 15:39:55 +01002663 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002664 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002665
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002666 // add fused activation layer
2667 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002668
Sadik Armagan479045b2018-10-01 11:51:37 +01002669 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2670 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2671}
2672
Kevin May7d96b162021-02-03 17:38:41 +00002673void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002674{
2675 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2676
2677 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2678 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2679
2680 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2681
2682 FullyConnectedDescriptor desc;
2683 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002684 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002685
2686 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2687 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2688 CHECK_VALID_SIZE(outputs.size(), 1);
2689
2690 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2691
2692 // Fully Connected Layer accepts two dimensional weights input
2693 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2694 if (weightsDimension != 2)
2695 {
2696 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002697 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
2698 "Node {}",
2699 weightsDimension,
2700 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002701 }
2702
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002703 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01002704 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002705
Matthew Sloyan81beae32021-07-13 19:46:11 +01002706 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2707 // Add the first input tensor to the registration list
2708 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0]};
2709 std::vector<unsigned int> ignoreInputWhenRegister = {};
Finn Williamsd4fa5452021-03-01 12:31:41 +00002710
2711 desc.m_ConstantWeights = IsConstTensor(inputs[1]);
2712
Matthew Sloyan81beae32021-07-13 19:46:11 +01002713 // Add the weights input to the registration list, constant layers will be added by SetupConstantLayers if constant.
2714 tensorIndexesToRegister.emplace_back(inputTensorIndexes[1]);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002715
Finn Williamsd4fa5452021-03-01 12:31:41 +00002716 if (inputs.size() == 3)
2717 {
2718 desc.m_BiasEnabled = true;
Matthew Sloyan81beae32021-07-13 19:46:11 +01002719
2720 // Add the biases input to the registration list, constant layer will be added by SetupConstantLayers.
2721 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00002722 }
2723
Matthew Sloyan81beae32021-07-13 19:46:11 +01002724 // Filters and biases are always passed to fully connected as inputs
2725 layer = m_Network->AddFullyConnectedLayer(desc, layerName.c_str());
Finn Williamsd4fa5452021-03-01 12:31:41 +00002726
2727 ARMNN_ASSERT(layer != nullptr);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002728 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2729
Finn Williamsd4fa5452021-03-01 12:31:41 +00002730 unsigned int startingSlotIndex = 0;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002731 if (inputTensorInfo.GetNumDimensions() > 2)
2732 {
2733 // Add reshape to flatten to 2D [batch_size, input_size],
2734 // where "input_size" corresponds to the number of inputs to the layer,
2735 // matching the second dimension of weights,
2736 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2737 std::vector<unsigned int> reshapedDimensions(2);
2738 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2739 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2740
2741 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2742 {
2743 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002744 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
2745 reshapedDimensions[1],
2746 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002747 }
2748
2749 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2750 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2751
James Ward58dec6b2020-09-11 17:32:44 +01002752 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Finn Williamsd4fa5452021-03-01 12:31:41 +00002753 armnn::ReshapeDescriptor reshapeDescriptor;
2754 reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
2755 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor, layerName.c_str());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002756
2757 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2758 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2759
2760 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
Finn Williamsd4fa5452021-03-01 12:31:41 +00002761 // Fc layer connects to the reshape layer, so we skip the first input slot when registering fc's input slots
2762 tensorIndexesToRegister.erase(tensorIndexesToRegister.begin());
2763 startingSlotIndex = 1;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002764 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00002765
2766 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister, startingSlotIndex);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002767
Sadik Armagand109a4d2020-07-28 10:42:13 +01002768 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002769 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2770
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002771 // we need to add the activation layer and fortunately we don't need to care about the data layout
2772 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2773 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002774
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002775 // register the output connection slots for the layer, connections are made after all layers have been created
2776 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2777 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2778}
2779
Kevin May7d96b162021-02-03 17:38:41 +00002780void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
keidav011b3e2ea2019-02-21 10:07:37 +00002781{
2782 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2783
2784 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2785
2786 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2787 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2788 CHECK_VALID_SIZE(outputs.size(), 4);
2789
2790 // Obtain custom options from flexbuffers
2791 auto custom_options = operatorPtr->custom_options;
2792 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2793
2794 // Obtain descriptor information from tf lite
2795 DetectionPostProcessDescriptor desc;
2796 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2797 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2798 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2799 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2800 desc.m_NumClasses = m["num_classes"].AsUInt32();
2801 desc.m_ScaleH = m["h_scale"].AsFloat();
2802 desc.m_ScaleW = m["w_scale"].AsFloat();
2803 desc.m_ScaleX = m["x_scale"].AsFloat();
2804 desc.m_ScaleY = m["y_scale"].AsFloat();
2805
keidav0107d58c72019-02-26 11:57:39 +00002806 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002807 {
keidav0107d58c72019-02-26 11:57:39 +00002808 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002809 }
2810 if (!(m["detections_per_class"].IsNull()))
2811 {
2812 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2813 }
2814
2815 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2816 {
2817 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2818 "must be positive and less than or equal to 1.");
2819 }
2820
2821 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00002822 auto anchorTensorAndData = CreateConstTensorNonPermuted(inputs[2], anchorTensorInfo);
keidav011b3e2ea2019-02-21 10:07:37 +00002823
James Ward58dec6b2020-09-11 17:32:44 +01002824 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsd4fa5452021-03-01 12:31:41 +00002825 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData,
keidav011b3e2ea2019-02-21 10:07:37 +00002826 layerName.c_str());
2827
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002828 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00002829
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002830 // The model does not specify the output shapes.
2831 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2832 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2833 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2834 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2835 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2836 m_OverridenOutputShapes.push_back({ 1 });
2837
keidav011b3e2ea2019-02-21 10:07:37 +00002838 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2839 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002840 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002841 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2842 }
2843
2844 // Register the input connection slots for the layer, connections are made after all layers have been created
2845 // only the tensors for the inputs are relevant, exclude the const tensors
2846 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2847 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2848
2849 // Register the output connection slots for the layer, connections are made after all layers have been created
2850 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2851 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2852 outputTensorIndexes[1],
2853 outputTensorIndexes[2],
2854 outputTensorIndexes[3]});
2855}
2856
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002857/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
Kevin May7d96b162021-02-03 17:38:41 +00002858void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002859{
2860 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2861
2862 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2863 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2864 CHECK_VALID_SIZE(outputs.size(), 1);
2865
2866 if (inputs.size() < 1)
2867 {
2868 throw ParseException("Pack must have at least one input.");
2869 }
2870
2871 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2872 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2873
2874 StackDescriptor desc;
2875 desc.m_Axis = static_cast<uint32_t>(options->axis);
2876 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2877
2878 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2879 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2880 desc.m_InputShape = inputTensorInfo.GetShape();
2881
James Ward58dec6b2020-09-11 17:32:44 +01002882 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002883 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2884
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002885 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002886
Sadik Armagand109a4d2020-07-28 10:42:13 +01002887 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002888 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2889
2890 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2891 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2892
2893 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2894 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2895}
2896
Kevin May7d96b162021-02-03 17:38:41 +00002897void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd200e3802019-04-15 09:47:39 +01002898{
2899 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2900
2901 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2902 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2903
2904 // This unpackAxis indicates the axis to unpack
2905 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2906
2907 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2908 CHECK_VALID_SIZE(inputs.size(), 1);
2909
2910 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002911
2912 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2913 {
2914 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002915 fmt::format("The unpack axis: {} cannot be greater than or equal to "
2916 "the number of input dimension {} {}",
2917 unpackAxis,
2918 inputTensorInfo.GetNumDimensions(),
2919 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002920 }
2921
Nina Drozd200e3802019-04-15 09:47:39 +01002922 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2923 // If num is not defined, automatically infer from the length of the dimension axis.
2924 if(unpackNum == 0)
2925 {
2926 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2927 }
2928
2929 // If unpack number cannot be inferred and is still zero, throw ParseException.
2930 if(unpackNum == 0)
2931 {
2932 throw ParseException("Number to unpack must greater than zero.");
2933 }
2934
2935 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2936 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2937
2938 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2939 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2940
2941 // Add current input shape to unpackDimSizes
2942 for (unsigned int i = 0; i < inputDimSize; ++i)
2943 {
2944 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2945 }
2946
2947 if (unpackDimSizes[unpackAxis] != unpackNum)
2948 {
2949 throw ParseException("Number to unpack must be the same as length of the dimension to "
2950 "unpack along.");
2951 }
2952
2953 unpackDimSizes[unpackAxis] /= unpackNum;
2954
2955 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2956 for (unsigned int j = 0; j < unpackNum; ++j)
2957 {
2958 // Set the size of the views.
2959 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2960 {
2961 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2962 }
2963 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2964 }
2965
James Ward58dec6b2020-09-11 17:32:44 +01002966 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01002967 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002968 ARMNN_ASSERT(layer != nullptr);
Nina Drozd200e3802019-04-15 09:47:39 +01002969
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002970 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2971 unpackDimSizes.data());
2972
Nina Drozd200e3802019-04-15 09:47:39 +01002973 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2974 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2975
Finn Williamsb49ed182021-06-29 15:50:08 +01002976 std::vector<unsigned int> reshapeDims;
2977 for (unsigned int axis = 0; axis < splitOutShape.GetNumDimensions(); ++axis)
2978 {
2979 if (axis != unpackAxis)
2980 {
2981 reshapeDims.push_back(splitOutShape[axis]);
2982 }
2983 }
2984
2985 TensorShape reshapeOutputShape(splitOutShape.GetNumDimensions() -1, reshapeDims.data());
2986
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002987 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2988 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2989 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002990 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01002991 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002992 armnn::ReshapeDescriptor desc;
Finn Williamsb49ed182021-06-29 15:50:08 +01002993 desc.m_TargetShape = reshapeOutputShape;
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002994 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2995
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002996 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2997 outputTensorInfo.GetDataType(),
2998 outputTensorInfo.GetQuantizationScale(),
2999 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003000 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
3001
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01003002 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003003
3004 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
3005 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
3006 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
3007 }
Nina Drozd200e3802019-04-15 09:47:39 +01003008}
3009
Kevin May7d96b162021-02-03 17:38:41 +00003010void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd0324f482019-04-08 10:52:10 +01003011{
3012 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3013
3014 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3015 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
3016
3017 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
3018
Nina Drozd200e3802019-04-15 09:47:39 +01003019 // If number of splits cannot be inferred and is zero, throw ParseException.
3020 if(numSplits == 0)
3021 {
3022 throw ParseException("Number to splits must greater than zero.");
3023 }
3024
Nina Drozd0324f482019-04-08 10:52:10 +01003025 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3026 CHECK_VALID_SIZE(inputs.size(), 2);
3027 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3028 CHECK_VALID_SIZE(outputs.size(), numSplits);
3029
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003030 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
3031 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
3032 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Nina Drozd0324f482019-04-08 10:52:10 +01003033
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003034 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003035 if (axisBufferPtr == nullptr)
3036 {
3037 throw ParseException(
3038 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
3039 CHECK_LOCATION().AsString()));
3040 }
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003041
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003042 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
3043 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
3044 int32_t axis = axisData[0];
3045
3046 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3047 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3048 {
3049 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
3050 // E.g. Rank 4 tensor can have axis in range [-4, 3)
3051 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
3052 throw ParseException(
3053 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
3054 axis,
3055 CHECK_LOCATION().AsString()));
3056 }
3057
3058 const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
Nina Drozd0324f482019-04-08 10:52:10 +01003059
Nina Drozd0324f482019-04-08 10:52:10 +01003060 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003061 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01003062 {
3063 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003064 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
3065 inputTensorInfo.GetNumDimensions(),
3066 MaxNumOfTensorDimensions,
3067 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01003068 }
3069
3070 std::vector<unsigned int> splitterDimSizes(inputDimSize);
3071
3072 // Add current input shape to splitterDimSizes
3073 for (unsigned int i = 0; i < inputDimSize; ++i)
3074 {
3075 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
3076 }
3077
3078 if (splitterDimSizes[splitDim] % numSplits != 0)
3079 {
3080 throw ParseException("Number of splits must evenly divide the dimension");
3081 }
3082 splitterDimSizes[splitDim] /= numSplits;
3083
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003084 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01003085 for (unsigned int j = 0; j < numSplits; ++j)
3086 {
3087 // Set the size of the views.
3088 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
3089 {
3090 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
3091 }
3092 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
3093 }
3094
James Ward58dec6b2020-09-11 17:32:44 +01003095 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01003096 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003097 ARMNN_ASSERT(layer != nullptr);
Nina Drozd0324f482019-04-08 10:52:10 +01003098
3099 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003100 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01003101
Nina Drozd0324f482019-04-08 10:52:10 +01003102 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
3103 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01003104 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01003105 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01003106 }
3107
3108 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3109 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3110}
3111
Derek Lambertif0176992020-04-28 13:37:49 +01003112unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
3113{
3114 int numDims = armnn::numeric_cast<int>(numDimsIn);
3115 int v = idx < 0 ? numDims + idx : idx;
3116 ARMNN_ASSERT(v >= 0);
3117 ARMNN_ASSERT(v < numDims);
3118
3119 return static_cast<unsigned int>(v);
3120}
3121
Kevin May7d96b162021-02-03 17:38:41 +00003122void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
Derek Lambertif0176992020-04-28 13:37:49 +01003123{
3124 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3125
3126 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Ryan OShea86704732020-05-26 11:41:04 +01003127 const auto * options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01003128
3129 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3130 CHECK_VALID_SIZE(inputs.size(), 3);
3131
3132 auto& inputTensor = inputs[0];
3133 auto& splitsTensor = inputs[1];
3134 auto& axisTensor = inputs[2];
3135
3136 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
3137 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
3138 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
3139 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
3140
3141 // Inputs
3142 auto inputDimSize = inputTensorInfo.GetNumDimensions();
3143 if (inputDimSize > MaxNumOfTensorDimensions)
3144 {
3145 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003146 fmt::format("The number of dimensions: {} for input tensors of the "
3147 "SplitV op cannot be greater than {} {}",
3148 inputTensorInfo.GetNumDimensions(),
3149 MaxNumOfTensorDimensions,
3150 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01003151 }
3152
3153 // Get split axis
3154 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003155 if (axisBufferPtr == nullptr)
3156 {
3157 throw ParseException(
3158 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
3159 CHECK_LOCATION().AsString()));
3160 }
3161
Derek Lambertif0176992020-04-28 13:37:49 +01003162 std::vector<int> axisData(axisTensorInfo.GetNumElements());
3163 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003164 int32_t axis = axisData[0];
3165
3166 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3167 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3168 {
3169 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
3170 // E.g. Rank 4 tensor can have axis in range [-4, 3)
3171 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
3172 throw ParseException(
3173 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
3174 axis,
3175 CHECK_LOCATION().AsString()));
3176 }
3177 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
Derek Lambertif0176992020-04-28 13:37:49 +01003178
Derek Lambertif0176992020-04-28 13:37:49 +01003179 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01003180 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01003181 unsigned int numSplits{0};
3182
3183 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01003184 {
3185 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01003186 }
3187 else
3188 {
Ryan OShea86704732020-05-26 11:41:04 +01003189 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01003190 }
3191
3192 if (numSplits <=0)
3193 {
3194 throw ParseException("SplitV has invalid number of splits");
3195 }
3196
Jan Eilersc0761e92020-06-29 16:48:44 +01003197 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01003198 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01003199 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01003200
Jan Eilersc0761e92020-06-29 16:48:44 +01003201 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01003202 int numInferred{0};
3203 unsigned int inferIdx{0};
3204 int splitSum{0};
3205 for (auto split : splitsData)
3206 {
3207 if (split < 0)
3208 {
3209 numInferred++;
3210 inferIdx = idx;
3211 }
3212 else
3213 {
3214 splitSum += split;
3215 }
3216 idx++;
3217 }
3218 // Check for inferred Axis
3219 if (numInferred == 0)
3220 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003221 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01003222 {
3223 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
3224 }
3225 }
3226 else if (numInferred == 1)
3227 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003228 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01003229 }
3230 else
3231 {
3232 throw ParseException("Cannot infer split size for more than one split");
3233 }
3234
Derek Lambertif0176992020-04-28 13:37:49 +01003235 //Ouput size validation
3236 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3237 CHECK_VALID_SIZE(outputs.size(), numSplits);
3238
3239 // Setup Armnn descriptor
3240 SplitterDescriptor splitDesc(numSplits, inputDimSize);
3241 unsigned int accumSplit = 0;
3242 for (unsigned int j = 0; j < numSplits; ++j)
3243 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003244 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01003245
3246 // Set the size of the views.
3247 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
3248 {
3249 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
3250 if (dimIdx == splitDim)
3251 {
3252 dimSize = splitSize;
3253 }
3254 splitDesc.SetViewSize(j, dimIdx, dimSize);
3255 }
3256
3257 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
3258 accumSplit += splitSize;
3259 }
3260
James Ward58dec6b2020-09-11 17:32:44 +01003261 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01003262 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003263 ARMNN_ASSERT(layer != nullptr);
Derek Lambertif0176992020-04-28 13:37:49 +01003264
3265 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3266 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3267
3268 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
3269 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01003270 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01003271 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
3272 }
3273
3274 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3275 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3276}
3277
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003278void TfLiteParserImpl::ParseArgMin(size_t subgraphIndex, size_t operatorIndex)
3279{
3280 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Min);
3281}
3282
Kevin May7d96b162021-02-03 17:38:41 +00003283void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
Inki Daed4619e22020-09-10 15:33:54 +09003284{
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003285 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Max);
3286}
3287
3288void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, ArgMinMaxFunction argMinMaxFunction)
3289{
Inki Daed4619e22020-09-10 15:33:54 +09003290 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3291 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3292 CHECK_VALID_SIZE(inputs.size(), 2);
3293
3294 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3295 CHECK_VALID_SIZE(outputs.size(), 1);
3296
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003297 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3298 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[1]);
Inki Daed4619e22020-09-10 15:33:54 +09003299 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003300 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003301
3302 // Check if output tensor type is Signed32 or Signed64
Mike Kelly1f140f72021-04-06 12:25:55 +01003303 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
3304 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
3305 {
3306 throw ParseException(
3307 fmt::format(
3308 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
3309 CHECK_LOCATION().AsString()));
3310 }
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003311
3312 // Get const axis value from model and set it to descriptor.
3313 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3314 if (axisBufferPtr == nullptr)
3315 {
3316 throw ParseException(
3317 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
3318 CHECK_LOCATION().AsString()));
3319 }
3320
3321 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
3322 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
3323 int32_t axis = axisData.front();
3324
3325 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3326 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3327 {
3328 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
3329 // E.g. Rank 4 tensor can have axis in range [-4, 3)
3330 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
3331 throw ParseException(
3332 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
3333 axis,
3334 CHECK_LOCATION().AsString()));
3335 }
3336
3337 ArgMinMaxDescriptor desc;
3338 desc.m_Axis = axis;
3339 desc.m_Function = argMinMaxFunction;
3340
3341 // Register a ArgMin/ArgMax layer.
3342 auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}";
3343 auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3344 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str());
3345 ARMNN_ASSERT(layer != nullptr);
Inki Daed4619e22020-09-10 15:33:54 +09003346 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3347
3348 // Register input tensor to the layer.
3349 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3350 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3351
3352 // Register output tensor to the layer.
3353 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3354 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3355}
3356
Kevin May7d96b162021-02-03 17:38:41 +00003357void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00003358{
3359 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3360
Kevin May7d96b162021-02-03 17:38:41 +00003361 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003362 CHECK_VALID_SIZE(inputs.size(), 2);
Kevin May7d96b162021-02-03 17:38:41 +00003363 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003364 CHECK_VALID_SIZE(outputs.size(), 1);
3365
3366 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3367 armnn::TensorInfo indicesTensorInfo = ToTensorInfo(inputs[1]);
3368 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3369
3370 armnn::GatherDescriptor gatherDescriptor;
3371
3372 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3373 const auto * options = operatorPtr->builtin_options.AsGatherOptions();
3374 auto axis = options->axis;
3375
3376 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3377 auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
3378 auto outputDimensions = outputTensorInfo.GetNumDimensions();
3379 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3380 {
3381 throw ParseException(
3382 fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
3383 axis,
3384 inputDimensions, inputDimensions,
3385 CHECK_LOCATION().AsString()));
3386 }
3387 if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
3388 {
3389 throw ParseException(
3390 fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
3391 outputDimensions,
3392 inputDimensions, indicesDimensions,
3393 CHECK_LOCATION().AsString()));
3394 }
3395
3396 gatherDescriptor.m_Axis = axis;
3397
3398 auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
3399 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
3400 ARMNN_ASSERT(layer != nullptr);
3401 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3402
3403 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3404 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3405
3406 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3407 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3408}
3409
Kevin May7d96b162021-02-03 17:38:41 +00003410void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00003411{
3412 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3413
Kevin May7d96b162021-02-03 17:38:41 +00003414 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003415 CHECK_VALID_SIZE(inputs.size(), 1);
Kevin May7d96b162021-02-03 17:38:41 +00003416 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003417 CHECK_VALID_SIZE(outputs.size(), 1);
3418
3419 armnn::DepthToSpaceDescriptor descriptor;
3420
3421 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3422 const auto * options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
3423 auto blockSize = options->block_size;
3424 if (blockSize < 2)
3425 {
3426 throw ParseException(
3427 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
3428 blockSize,
3429 CHECK_LOCATION().AsString()));
3430 }
3431 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
3432
3433 auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
3434 IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
3435 ARMNN_ASSERT(layer != nullptr);
3436 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3437 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3438
3439 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3440 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3441
3442 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3443 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3444}
3445
Kevin May7d96b162021-02-03 17:38:41 +00003446void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003447{
Sadik Armagana2747482021-02-09 10:28:54 +00003448 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Sum);
3449}
3450
Teresa Charlin4e3e8312021-08-05 12:34:37 +01003451void TfLiteParserImpl::ParseReduceProd(size_t subgraphIndex, size_t operatorIndex)
3452{
3453 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Prod);
3454}
3455
Sadik Armagana2747482021-02-09 10:28:54 +00003456void TfLiteParserImpl::ParseReduceMax(size_t subgraphIndex, size_t operatorIndex)
3457{
3458 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Max);
3459}
3460
3461void TfLiteParserImpl::ParseReduceMin(size_t subgraphIndex, size_t operatorIndex)
3462{
3463 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Min);
3464}
3465
3466void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, ReduceOperation reduceOperation)
3467{
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003468 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3469
3470 const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3471 const auto *options = operatorPtr->builtin_options.AsReducerOptions();
3472
3473 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3474 CHECK_VALID_SIZE(inputs.size(), 2);
3475
3476 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3477 CHECK_VALID_SIZE(outputs.size(), 1);
3478
Sadik Armagana2747482021-02-09 10:28:54 +00003479 auto layerName = fmt::format("Reduce:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003480
3481 armnn::TensorInfo inputTensorInfo0 = ToTensorInfo(inputs[0]);
3482 armnn::TensorInfo inputTensorInfo1 = ToTensorInfo(inputs[1]);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003483
3484 ReduceDescriptor desc;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003485 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3486 // Get const axis value from model and set it to descriptor.
3487 if (axisBufferPtr != nullptr)
3488 {
Sadik Armagan49bdb792021-02-11 13:57:07 +00003489 std::vector<int32_t> axisData(inputTensorInfo1.GetNumElements());
3490 ::memcpy(axisData.data(), axisBufferPtr->data.data(), inputTensorInfo1.GetNumBytes());
3491
3492 // Convert the axis to unsigned int and remove duplicates.
3493 auto rank = static_cast<int32_t>(inputTensorInfo0.GetNumDimensions());
3494 std::set<unsigned int> uniqueAxis;
3495 std::transform(axisData.begin(),
3496 axisData.end(),
3497 std::inserter(uniqueAxis, uniqueAxis.begin()),
3498 [rank](int i)->unsigned int{
3499 return static_cast<uint32_t>(((i + rank) % rank)); });
3500 desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003501 }
Sadik Armagana2747482021-02-09 10:28:54 +00003502 else
3503 {
3504 for (uint32_t i = 0; i < inputTensorInfo0.GetNumDimensions(); ++i)
3505 {
3506 desc.m_vAxis.push_back(i);
3507 }
3508 }
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003509
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003510 desc.m_KeepDims = options->keep_dims;
Sadik Armagana2747482021-02-09 10:28:54 +00003511 desc.m_ReduceOperation = reduceOperation;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003512
3513 // Register a new layer object, Sum.
3514 IConnectableLayer *layer = m_Network->AddReduceLayer(desc, layerName.c_str());
3515
3516 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
3517 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3518
3519 // Register input tensor to the layer.
3520 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3521 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3522
3523 // Register output tensor to the layer.
3524 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3525 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3526}
3527
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003528void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
3529{
3530 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
3531}
3532
3533void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
3534{
3535 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
3536}
3537
Mike Kelly31dce2b2021-09-01 21:22:37 +01003538void TfLiteParserImpl::ParseLocalResponseNormalization(size_t subgraphIndex, size_t operatorIndex)
3539{
3540 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3541
3542 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3543 CHECK_VALID_SIZE(inputs.size(), 1);
3544
3545 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3546 CHECK_VALID_SIZE(outputs.size(), 1);
3547
3548 auto layerName = fmt::format("LRN:{}:{}", subgraphIndex, operatorIndex);
3549 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3550
3551 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3552
3553 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3554 const auto* options = operatorPtr->builtin_options.AsLocalResponseNormalizationOptions();
3555
3556 armnn::NormalizationDescriptor descriptor;
3557 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3558 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3559 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3560 descriptor.m_NormSize = static_cast<uint32_t>(options->radius);
3561 descriptor.m_K = options->bias;
3562 descriptor.m_Alpha = options->alpha;
3563 descriptor.m_Beta = options->beta;
3564
3565 // ArmNN expects normSize to be the full size of the normalization
3566 // window rather than the radius as in TfLite.
3567 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3568
3569 IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor, layerNameFormatted.c_str());
3570 ARMNN_ASSERT(layer != nullptr);
3571
3572 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3573 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3574
3575 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3576 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3577
3578 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3579 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3580}
3581
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003582void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
3583{
3584 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
3585}
3586
3587void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
3588{
3589 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
3590}
3591
3592void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
3593{
3594 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
3595}
3596
3597void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
3598{
3599 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3600
3601 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3602 CHECK_VALID_SIZE(inputs.size(), 1);
3603
3604 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3605 CHECK_VALID_SIZE(outputs.size(), 1);
3606
3607 std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
3608 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3609
3610 ElementwiseUnaryDescriptor desc;
3611 desc.m_Operation = unaryOperation;
3612 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
3613 ARMNN_ASSERT(layer != nullptr);
3614
3615 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3616 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3617
3618 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3619 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3620
3621 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3622 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3623}
3624
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03003625void TfLiteParserImpl::ParseEqual(size_t subgraphIndex, size_t operatorIndex)
3626{
3627 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Equal);
3628}
3629
3630void TfLiteParserImpl::ParseNotEqual(size_t subgraphIndex, size_t operatorIndex)
3631{
3632 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::NotEqual);
3633}
3634
3635void TfLiteParserImpl::ParseGreater(size_t subgraphIndex, size_t operatorIndex)
3636{
3637 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Greater);
3638}
3639
3640void TfLiteParserImpl::ParseGreaterOrEqual(size_t subgraphIndex, size_t operatorIndex)
3641{
3642 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::GreaterOrEqual);
3643}
3644
3645void TfLiteParserImpl::ParseLess(size_t subgraphIndex, size_t operatorIndex)
3646{
3647 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Less);
3648}
3649
3650void TfLiteParserImpl::ParseLessOrEqual(size_t subgraphIndex, size_t operatorIndex)
3651{
3652 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::LessOrEqual);
3653}
3654
3655void TfLiteParserImpl::ParseComparison(size_t subgraphIndex, size_t operatorIndex,
3656 ComparisonOperation comparisonOperation)
3657{
3658 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3659
3660 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3661 CHECK_VALID_SIZE(inputs.size(), 2);
3662
3663 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3664 CHECK_VALID_SIZE(outputs.size(), 1);
3665
3666 auto layerName = std::string(GetComparisonOperationAsCString(comparisonOperation)) + ":{}:{}";
3667 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3668
3669 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3670 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
3671 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerNameFormatted, "Input 0", "Input 1");
3672
3673 ComparisonDescriptor desc;
3674 desc.m_Operation = comparisonOperation;
3675 IConnectableLayer* layer = m_Network->AddComparisonLayer(desc, layerNameFormatted.c_str());
3676 ARMNN_ASSERT(layer != nullptr);
3677
3678 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3679 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3680
3681 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3682 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3683
3684 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3685 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3686}
3687
Kevin May7d96b162021-02-03 17:38:41 +00003688armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
3689 unsigned int outputSlot,
3690 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01003691{
3692 ActivationDescriptor activationDesc;
3693 std::string layerName = prevLayer->GetName();
3694
3695 switch(activationType)
3696 {
3697 case tflite::ActivationFunctionType_NONE:
3698 {
3699 // this is a no-op: return previous layer
3700 return prevLayer;
3701 }
3702 case tflite::ActivationFunctionType_RELU:
3703 {
3704 activationDesc.m_Function = ActivationFunction::ReLu;
3705 layerName += ":RELU";
3706 break;
3707 }
3708 case tflite::ActivationFunctionType_RELU6:
3709 {
3710 activationDesc.m_Function = ActivationFunction::BoundedReLu;
3711 activationDesc.m_A = 6.0f;
3712 activationDesc.m_B = 0.0f;
3713 layerName += ":RELU6";
3714 break;
3715 }
3716 case tflite::ActivationFunctionType_TANH:
3717 {
3718 activationDesc.m_Function = ActivationFunction::TanH;
3719 activationDesc.m_A = 1.0f;
3720 activationDesc.m_B = 1.0f;
3721 layerName += ":TANH";
3722 break;
3723 }
3724
3725 // I only put these here as a reminder what others we could support
3726 case tflite::ActivationFunctionType_RELU_N1_TO_1:
3727 case tflite::ActivationFunctionType_SIGN_BIT:
3728 default:
3729 {
3730 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003731 fmt::format("TfLite parser doesn't suppport fused activation: "
3732 "{}/{} {} ",
3733 activationType,
3734 tflite::EnumNameActivationFunctionType(activationType),
3735 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003736
3737 }
3738 }
3739
3740 IConnectableLayer* activationLayer =
3741 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
3742
3743 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
3744 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
3745 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
3746 return activationLayer;
3747}
3748
Kevin May7d96b162021-02-03 17:38:41 +00003749TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char * fileName)
telsoa01c577f2c2018-08-31 09:22:23 +01003750{
3751 if (fileName == nullptr)
3752 {
James Ward58dec6b2020-09-11 17:32:44 +01003753 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003754 CHECK_LOCATION().AsString()));
3755 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01003756 std::error_code errorCode;
3757 fs::path pathToFile(fileName);
3758 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01003759 {
James Ward58dec6b2020-09-11 17:32:44 +01003760 //fmt::format() could not be used here (format error)
3761 std::stringstream msg;
3762 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
3763 << " " << CHECK_LOCATION().AsString();
3764
3765 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01003766 }
3767 std::ifstream file(fileName, std::ios::binary);
3768 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3769 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3770 fileContent.size());
3771}
3772
Kevin May7d96b162021-02-03 17:38:41 +00003773TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
telsoa01c577f2c2018-08-31 09:22:23 +01003774{
3775 if (binaryContent == nullptr)
3776 {
James Ward58dec6b2020-09-11 17:32:44 +01003777 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003778 CHECK_LOCATION().AsString()));
3779 }
3780 flatbuffers::Verifier verifier(binaryContent, len);
3781 if (verifier.VerifyBuffer<tflite::Model>() == false)
3782 {
3783 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003784 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
3785 "flatbuffers format. size:{} {}",
3786 len,
3787 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003788 }
3789 return tflite::UnPackModel(binaryContent);
3790}
3791
Kevin May7d96b162021-02-03 17:38:41 +00003792TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetInputs(const ModelPtr & model,
3793 size_t subgraphIndex,
3794 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003795{
3796 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3797
Derek Lambertiff05cc52019-04-26 13:05:17 +01003798 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3799 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003800
3801 size_t inputCount = operatorPtr->inputs.size();
mathad01c21025d2021-04-26 10:09:37 +01003802 TensorRawPtrVector result;
telsoa01c577f2c2018-08-31 09:22:23 +01003803 for (size_t i=0; i<inputCount; ++i)
3804 {
mathad01c21025d2021-04-26 10:09:37 +01003805 // If the input location is -1 then assume input is turned off.
3806 if (operatorPtr->inputs[i] == -1)
3807 {
3808 continue;
3809 }
3810 else
3811 {
3812 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
3813 result.push_back(subgraphPtr->tensors[inputId].get());
3814 }
telsoa01c577f2c2018-08-31 09:22:23 +01003815 }
3816 return result;
3817}
3818
Kevin May7d96b162021-02-03 17:38:41 +00003819TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetOutputs(const ModelPtr & model,
3820 size_t subgraphIndex,
3821 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003822{
3823 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3824
Derek Lambertiff05cc52019-04-26 13:05:17 +01003825 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3826 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003827
3828 size_t outputCount = operatorPtr->outputs.size();
3829 TensorRawPtrVector result(outputCount);
3830 for (size_t i=0; i<outputCount; ++i)
3831 {
3832 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3833 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003834 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003835 }
3836 return result;
3837}
3838
Kevin May7d96b162021-02-03 17:38:41 +00003839TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphInputs(const ModelPtr & model,
3840 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003841{
3842 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003843 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003844
Derek Lambertiff05cc52019-04-26 13:05:17 +01003845 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003846 TensorIdRawPtrVector result(inputCount);
3847 for (size_t i=0; i<inputCount; ++i)
3848 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003849 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01003850 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003851 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003852 }
3853 return result;
3854}
3855
Kevin May7d96b162021-02-03 17:38:41 +00003856TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphOutputs(const ModelPtr & model,
3857 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003858{
3859 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003860 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003861
Derek Lambertiff05cc52019-04-26 13:05:17 +01003862 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003863 TensorIdRawPtrVector result(outputCount);
3864 for (size_t i=0; i<outputCount; ++i)
3865 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003866 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3867 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003868 }
3869 return result;
3870}
3871
Kevin May7d96b162021-02-03 17:38:41 +00003872std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
3873 size_t subgraphIndex,
3874 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003875{
3876 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003877 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3878 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003879 return operatorPtr->inputs;
3880}
3881
Kevin May7d96b162021-02-03 17:38:41 +00003882std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
3883 size_t subgraphIndex,
3884 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003885{
3886 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003887 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3888 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003889 return operatorPtr->outputs;
3890}
3891
Kevin May7d96b162021-02-03 17:38:41 +00003892void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
3893 size_t operatorIndex,
3894 IConnectableLayer* layer,
Finn Williamsd4fa5452021-03-01 12:31:41 +00003895 const std::vector<unsigned int>& tensorIndexes,
3896 unsigned int startingSlotIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003897{
3898 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003899 ARMNN_ASSERT(layer != nullptr);
Matthew Sloyan81beae32021-07-13 19:46:11 +01003900
Finn Williamsd4fa5452021-03-01 12:31:41 +00003901 if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots())
telsoa01c577f2c2018-08-31 09:22:23 +01003902 {
3903 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003904 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
3905 " for subgraph:{} operator index:{} {}",
3906 tensorIndexes.size(),
3907 layer->GetNumInputSlots(),
3908 subgraphIndex,
3909 operatorIndex,
3910 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003911 }
3912
Finn Williamsd4fa5452021-03-01 12:31:41 +00003913 for (unsigned int index = 0; index < tensorIndexes.size() ; ++index)
telsoa01c577f2c2018-08-31 09:22:23 +01003914 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00003915 unsigned int tensorIndex = tensorIndexes[index];
3916 armnn::IInputSlot* slot = &(layer->GetInputSlot(startingSlotIndex + index));
telsoa01c577f2c2018-08-31 09:22:23 +01003917 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
3918 }
3919}
3920
Kevin May7d96b162021-02-03 17:38:41 +00003921void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
3922 size_t operatorIndex,
3923 IConnectableLayer* layer,
3924 const std::vector<unsigned int>& tensorIndexes)
telsoa01c577f2c2018-08-31 09:22:23 +01003925{
3926 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003927 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003928 if (tensorIndexes.size() != layer->GetNumOutputSlots())
3929 {
3930 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003931 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
3932 " for subgraph:{} operator index:{} {}",
3933 tensorIndexes.size(),
3934 layer->GetNumOutputSlots(),
3935 subgraphIndex,
3936 operatorIndex,
3937 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003938 }
3939
3940 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
3941 {
3942 unsigned int tensorIndex = tensorIndexes[slotIndex];
3943 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
3944 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3945 }
3946}
3947
Kevin May7d96b162021-02-03 17:38:41 +00003948void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003949{
3950 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3951
3952 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
3953 for (auto const & tensorIdAndPtr : inputs)
3954 {
3955 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3956 IConnectableLayer* layer =
3957 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3958
3959 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
3960 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3961
3962 RegisterOutputSlots(subgraphIndex,
3963 VIRTUAL_OPERATOR_ID,
3964 layer,
3965 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3966 }
3967}
3968
Kevin May7d96b162021-02-03 17:38:41 +00003969void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003970{
3971 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3972
3973 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
3974 for (auto const & tensorIdAndPtr : outputs)
3975 {
3976 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3977 IConnectableLayer* layer =
3978 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3979
3980 RegisterInputSlots(subgraphIndex,
3981 VIRTUAL_OPERATOR_ID,
3982 layer,
3983 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3984 }
3985}
3986
Kevin May7d96b162021-02-03 17:38:41 +00003987void TfLiteParserImpl::SetupConstantLayers(size_t subgraphIndex)
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003988{
3989 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3990
Derek Lambertiff05cc52019-04-26 13:05:17 +01003991 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003992 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
3993 {
3994 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
3995 {
3996 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
3997 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
3998 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003999 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02004000
Matthew Sloyan81beae32021-07-13 19:46:11 +01004001 if(IsConstTensor(tensorPtr))
4002 {
4003 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
4004 auto tensorAndData = CreateConstTensorNonPermuted(tensorPtr, tensorInfo);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02004005
Matthew Sloyan81beae32021-07-13 19:46:11 +01004006 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
4007 IConnectableLayer *layer = m_Network->AddConstantLayer(tensorAndData, layerName.c_str());
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02004008
Matthew Sloyan81beae32021-07-13 19:46:11 +01004009 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
4010 RegisterOutputSlots(subgraphIndex,
4011 VIRTUAL_OPERATOR_ID,
4012 layer,
4013 { tensorIndex });
4014 }
4015 else
4016 {
4017 throw ParseException(
4018 fmt::format("Invalid Tensor: Tensor should be constant. {}",
4019 CHECK_LOCATION().AsString()));
4020 }
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02004021 }
4022 }
4023 }
4024}
4025
telsoa01c577f2c2018-08-31 09:22:23 +01004026// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Kevin May7d96b162021-02-03 17:38:41 +00004027TfLiteParserImpl::BufferRawPtr TfLiteParserImpl::GetBuffer(const ModelPtr& model, size_t bufferIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004028{
4029 CHECK_BUFFER(model, bufferIndex);
4030 return model->buffers[bufferIndex].get();
4031}
4032
Matteo Martincigh747ef822018-12-18 09:26:39 +00004033template<typename T>
Kevin May7d96b162021-02-03 17:38:41 +00004034std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
4035TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
4036 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00004037 armnn::TensorInfo& tensorInfo,
4038 armnn::Optional<armnn::PermutationVector&> permutationVector)
4039{
Matthew Sloyan81beae32021-07-13 19:46:11 +01004040 // Make sure isConstant flag is set.
4041 tensorInfo.SetConstant();
4042
Matteo Martincigh747ef822018-12-18 09:26:39 +00004043 auto constData = CreateConstTensorImpl<T>(bufferPtr,
4044 tensorPtr,
4045 tensorInfo,
4046 permutationVector);
Kevin May7d96b162021-02-03 17:38:41 +00004047 TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
Matteo Martincigh747ef822018-12-18 09:26:39 +00004048 return std::make_pair(constData.first, std::move(storage));
4049}
4050
Finn Williamsd4fa5452021-03-01 12:31:41 +00004051bool TfLiteParserImpl::IsConstTensor(TensorRawPtr tensorPtr)
4052{
4053 CHECK_TENSOR_PTR(tensorPtr);
mathad01bf7edb62021-04-20 16:12:45 +01004054 bool isConst = true;
4055
4056 auto buffer = GetBuffer(m_Model, tensorPtr->buffer);
4057 if (buffer->data.size() == 0)
4058 {
4059 isConst = false;
4060 }
4061
4062 return isConst;
Finn Williamsd4fa5452021-03-01 12:31:41 +00004063}
4064
Kevin May7d96b162021-02-03 17:38:41 +00004065std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
Finn Williamsd4fa5452021-03-01 12:31:41 +00004066TfLiteParserImpl::CreateConstTensorPermuted(TensorRawPtr tensorPtr,
4067 armnn::TensorInfo& tensorInfo,
4068 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01004069{
4070 CHECK_TENSOR_PTR(tensorPtr);
4071 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
4072 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
4073
Matthew Sloyan81beae32021-07-13 19:46:11 +01004074 // Make sure isConstant flag is set.
4075 tensorInfo.SetConstant();
4076
telsoa01c577f2c2018-08-31 09:22:23 +01004077 switch (tensorInfo.GetDataType())
4078 {
4079 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00004080 return CreateConstTensorAndStoreData<float>(bufferPtr,
4081 tensorPtr,
4082 tensorInfo,
4083 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00004084 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00004085 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
4086 tensorPtr,
4087 tensorInfo,
4088 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00004089 case armnn::DataType::QSymmS8:
4090 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
4091 tensorPtr,
4092 tensorInfo,
4093 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00004094 case armnn::DataType::QAsymmS8:
4095 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
4096 tensorPtr,
4097 tensorInfo,
4098 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01004099 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00004100 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
4101 tensorPtr,
4102 tensorInfo,
4103 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01004104 default:
4105 {
4106 std::stringstream errString;
4107 errString << "Unexpected datatype when creating const tensor: "
4108 << armnn::GetDataTypeName(tensorInfo.GetDataType())
4109 << " shape:" << tensorInfo.GetShape()
4110 << CHECK_LOCATION().AsString();
4111 throw ParseException(errString.str());
4112 }
4113 }
4114}
4115
Finn Williamsd4fa5452021-03-01 12:31:41 +00004116armnn::ConstTensor TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
4117 armnn::TensorInfo& tensorInfo)
4118{
4119 CHECK_TENSOR_PTR(tensorPtr);
4120 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
4121 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
4122
Matthew Sloyan81beae32021-07-13 19:46:11 +01004123 // Make sure isConstant flag is set.
4124 tensorInfo.SetConstant();
4125
Finn Williamsd4fa5452021-03-01 12:31:41 +00004126 return ConstTensor(tensorInfo, bufferPtr->data.data());
4127}
4128
Kevin May7d96b162021-02-03 17:38:41 +00004129BindingPointInfo TfLiteParserImpl::GetNetworkInputBindingInfo(size_t subgraphId,
4130 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01004131{
4132 CHECK_SUBGRAPH(m_Model, subgraphId);
4133 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
4134 for (auto const & input : inputs)
4135 {
4136 if (input.second->name == name)
4137 {
4138 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
4139 return std::make_pair(bindingId, ToTensorInfo(input.second));
4140 }
4141 }
4142
4143 std::stringstream bindings;
4144 for (auto const & input : inputs)
4145 {
4146 bindings << "'" << input.second->name << "' ";
4147 }
4148
4149 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004150 fmt::format("No input binding found for subgraph:{} and name:{}. "
4151 "Possible inputs are: [{}] {}",
4152 subgraphId,
4153 name,
4154 bindings.str(),
4155 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004156}
4157
Kevin May7d96b162021-02-03 17:38:41 +00004158BindingPointInfo TfLiteParserImpl::GetNetworkOutputBindingInfo(size_t subgraphId,
4159 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01004160{
4161 CHECK_SUBGRAPH(m_Model, subgraphId);
4162 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00004163 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004164 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00004165 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01004166 if (output.second->name == name)
4167 {
4168 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00004169 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
4170 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
4171 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01004172 }
4173 }
4174
4175 std::stringstream bindings;
4176 for (auto const & output : outputs)
4177 {
4178 bindings << "'" << output.second->name << "' ";
4179 }
4180
4181 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004182 fmt::format("No output binding found for subgraph:{} and name:{}. "
4183 "Possible outputs are: [{}] {}",
4184 subgraphId,
4185 name,
4186 bindings.str(),
4187 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004188}
4189
Kevin May7d96b162021-02-03 17:38:41 +00004190size_t TfLiteParserImpl::GetSubgraphCount() const
telsoa01c577f2c2018-08-31 09:22:23 +01004191{
4192 return m_Model->subgraphs.size();
4193}
4194
Kevin May7d96b162021-02-03 17:38:41 +00004195std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01004196{
4197 CHECK_SUBGRAPH(m_Model, subgraphId);
4198 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
4199 std::vector<std::string> result;
4200 result.reserve(inputs.size());
4201 for (auto const & input : inputs)
4202 {
4203 result.push_back(input.second->name);
4204 }
4205 return result;
4206}
4207
Kevin May7d96b162021-02-03 17:38:41 +00004208std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01004209{
4210 CHECK_SUBGRAPH(m_Model, subgraphId);
4211 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
4212 std::vector<std::string> result;
4213 result.reserve(outputs.size());
4214 for (auto const & output : outputs)
4215 {
4216 result.push_back(output.second->name);
4217 }
4218 return result;
4219}
4220
Matthew Sloyanac001ee2021-02-03 10:43:04 +00004221const std::string TfLiteParserImpl::GetVersion()
4222{
4223 return TFLITE_PARSER_VERSION;
4224}
4225
Kevin May7d96b162021-02-03 17:38:41 +00004226TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01004227: m_FloatData(std::move(data))
4228, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00004229, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01004230, m_Int32Data(nullptr)
4231{
4232}
4233
Kevin May7d96b162021-02-03 17:38:41 +00004234TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01004235: m_FloatData(nullptr)
4236, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00004237, m_Int8Data(nullptr)
4238, m_Int32Data(nullptr)
4239{
4240}
4241
Kevin May7d96b162021-02-03 17:38:41 +00004242TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
Keith Davisd305e1a2020-01-22 11:57:54 +00004243: m_FloatData(nullptr)
4244, m_Uint8Data(nullptr)
4245, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01004246, m_Int32Data(nullptr)
4247{
4248}
4249
Kevin May7d96b162021-02-03 17:38:41 +00004250TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01004251: m_FloatData(nullptr)
4252, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00004253, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01004254, m_Int32Data(std::move(data))
4255{
4256}
4257
4258} // armnnTfLiteParser