blob: 3f4f0d811f56eabc1535c72bff2a2789bc9d6e56 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kelly04d82292023-01-19 18:29:40 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Sloyanac001ee2021-02-03 10:43:04 +00008#include "armnnTfLiteParser/Version.hpp"
Mike Kelly5880b912022-01-28 16:18:54 +00009#include "armnn/LstmParams.hpp"
Matthew Sloyanac001ee2021-02-03 10:43:04 +000010
Sadik Armagand109a4d2020-07-28 10:42:13 +010011#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +000012#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010013#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000014#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010015#include <armnn/Tensor.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000016#include <armnnUtils/TensorUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010017#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010018#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000019#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010020#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010021
22// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000023#include <armnnUtils/Permute.hpp>
Rob Hughes9542f902021-07-14 09:48:54 +010024#include <armnnUtils/Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000025
Sadik Armagan479045b2018-10-01 11:51:37 +010026#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010027#include <VerificationHelpers.hpp>
28
29// The generated code based on the Tf Lite schema:
30#include <schema_generated.h>
31
Matteo Martincighe011d202019-11-28 11:35:47 +000032#include <flatbuffers/flexbuffers.h>
33
James Ward58dec6b2020-09-11 17:32:44 +010034#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010035
telsoa01c577f2c2018-08-31 09:22:23 +010036#include <algorithm>
Matthew Sloyanac001ee2021-02-03 10:43:04 +000037#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010038#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010039#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000040
41#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
42 { \
43 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
44 << ": " \
45 << CHECK_LOCATION().AsString()).str()); \
46 }
telsoa01c577f2c2018-08-31 09:22:23 +010047
48using namespace armnn;
49using armnn::CheckLocation;
50namespace armnnTfLiteParser
51{
Kevin May7d96b162021-02-03 17:38:41 +000052
53ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
54 pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
55
56ITfLiteParser::~ITfLiteParser() = default;
57
58ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
59{
60 return new ITfLiteParser(options);
61}
62
63ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
64{
65 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
66}
67
68void ITfLiteParser::Destroy(ITfLiteParser* parser)
69{
70 delete parser;
71}
72
73armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
74{
75 return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
76}
77
Mike Kelly0d77ae12022-01-07 17:42:27 +000078armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
Kevin May7d96b162021-02-03 17:38:41 +000079{
80 return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
81}
82
83BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
84 const std::string& name) const
85{
86 return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
87}
88
89BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
90 const std::string& name) const
91{
92 return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
93}
94
95size_t ITfLiteParser::GetSubgraphCount() const
96{
97 return pTfLiteParserImpl->GetSubgraphCount();
98}
99
100std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
101{
102 return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
103}
104
105std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
106{
107 return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
108}
109
telsoa01c577f2c2018-08-31 09:22:23 +0100110namespace
111{
jimfly01c25411c2018-11-14 17:47:22 +0000112
telsoa01c577f2c2018-08-31 09:22:23 +0100113const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
114
Mike Kelly0d77ae12022-01-07 17:42:27 +0000115void CheckSubgraph(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100116 size_t subgraphIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000117 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100118{
119 if (model.get() == nullptr)
120 {
121 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100122 fmt::format("{} was called with invalid (null) model. "
123 "Possible reason is that the model is not yet loaded and Unpack(ed). "
124 "subgraph:{} at {}",
125 location.m_Function,
126 subgraphIndex,
127 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100128 }
129 else if (subgraphIndex >= model->subgraphs.size())
130 {
131 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100132 fmt::format("{} was called with an invalid subgraph index. "
133 "subgraph:{} at {}",
134 location.m_Function,
135 subgraphIndex,
136 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100137 }
138}
139
140#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
141 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
142
Mike Kelly0d77ae12022-01-07 17:42:27 +0000143void CheckModel(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100144 size_t subgraphIndex,
145 size_t operatorIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000146 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100147{
148 if (model.get() == nullptr)
149 {
150 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100151 fmt::format("{} was called with invalid (null) model. "
152 "Possible reason is that the model is not yet loaded and Unpack(ed). "
153 "subgraph:{} operator:{} at {}",
154 location.m_Function,
155 subgraphIndex,
156 operatorIndex,
157 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100158 }
159 else if (subgraphIndex >= model->subgraphs.size())
160 {
161 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100162 fmt::format("{} was called with an invalid subgraph index. "
163 "subgraph:{} operator:{} at {}",
164 location.m_Function,
165 subgraphIndex,
166 operatorIndex,
167 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100168 }
169 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
170 operatorIndex != VIRTUAL_OPERATOR_ID)
171 {
172 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100173 fmt::format("{} was called with an invalid operator index. "
174 "subgraph:{} operator:{} at {}",
175 location.m_Function,
176 subgraphIndex,
177 operatorIndex,
178 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100179 }
180}
181
182#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
183 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
184
Mike Kelly0d77ae12022-01-07 17:42:27 +0000185void CheckTensor(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100186 size_t subgraphIndex,
187 size_t tensorIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000188 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100189{
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // the tensor index is the only one to check here
191 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
192 {
193 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100194 fmt::format("{} was called with an invalid tensor index. "
195 "subgraph:{} tensor:{} at {}",
196 location.m_Function,
197 subgraphIndex,
198 tensorIndex,
199 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100200 }
201}
202
203#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
204 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
205
Kevin May7d96b162021-02-03 17:38:41 +0000206void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000207 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100208{
209 if (rawPtr == nullptr)
210 {
211 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100212 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100213 }
214}
215
216#define CHECK_TENSOR_PTR(TENSOR_PTR) \
217 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
218
Mike Kelly0d77ae12022-01-07 17:42:27 +0000219void CheckBuffer(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100220 size_t bufferIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000221 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100222{
223 if (model.get() == nullptr)
224 {
225 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100226 fmt::format("{} was called with invalid (null) model. "
227 "Possible reason is that the model is not yet loaded and Unpack(ed). "
228 "buffer:{} at {}",
229 location.m_Function,
230 bufferIndex,
231 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100232 }
233 else if (bufferIndex >= model->buffers.size())
234 {
235 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100236 fmt::format("{} was called with an invalid buffer index. "
237 "buffer index:{} at {}",
238 location.m_Function,
239 bufferIndex,
240 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100241 }
242 else if (model->buffers[bufferIndex].get() == nullptr)
243 {
244 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100245 fmt::format("The buffer #{} is null. {}",
246 bufferIndex,
247 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100248 }
249}
250
251#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
252 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
253
Kevin May7d96b162021-02-03 17:38:41 +0000254void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000255 const armnn::TensorInfo& tensorInfo,
telsoa01c577f2c2018-08-31 09:22:23 +0100256 uint32_t bufferId,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000257 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100258{
259 if (bufferPtr == nullptr)
260 {
261 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100262 fmt::format("BufferPtr is null for buffer:{}. {}",
263 bufferId,
264 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100265 }
266 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
267 tensorInfo.GetNumBytes() > bufferPtr->data.size())
268 {
269 std::stringstream ss;
270 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
271 << "For tensor: " << tensorInfo.GetShape()
272 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
273 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
274 throw ParseException(ss.str());
275 }
276}
277
Mike Kelly0d77ae12022-01-07 17:42:27 +0000278
279tflite::BuiltinOperator GetOpCode(const TfLiteParserImpl::ModelPtr& model, size_t subgraphIndex, size_t operatorIndex)
280{
281 const auto& operatorPtr = model->subgraphs[subgraphIndex]->operators[operatorIndex];
282 auto opcodeIndex = operatorPtr->opcode_index;
283
284// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
285#if defined(ARMNN_POST_TFLITE_2_3)
286 auto opcode = std::max(model->operator_codes[opcodeIndex]->builtin_code,
287 static_cast<tflite::BuiltinOperator>(model->operator_codes[opcodeIndex]->deprecated_builtin_code));
288#else
289 auto opcode = model->operator_codes[opcodeIndex]->builtin_code;
290#endif
291 return opcode;
292}
293
294std::vector<unsigned int> GetUIntBuffer(armnn::TensorInfo info,
295 const TfLiteParserImpl::ModelPtr& model,
296 size_t bufferIndex)
297{
298 TfLiteParserImpl::BufferRawPtr bufferPtr = TfLiteParserImpl::GetBuffer(model, bufferIndex);
299 std::vector<unsigned int> buffer(info.GetNumElements());
300
301 if (info.GetDataType() == DataType::Signed32)
302 {
303 ::memcpy(buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
304 }
305 else if (info.GetDataType() == DataType::Signed64)
306 {
307 std::vector<uint64_t> uint64Buffer(info.GetNumElements());
308 ::memcpy(uint64Buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
309 buffer.assign(std::begin(uint64Buffer), std::end(uint64Buffer));
310 }
Mike Kelly0506ef02023-01-03 16:29:44 +0000311 else
312 {
313 CheckLocation location = CHECK_LOCATION();
314 throw ParseException(
315 fmt::format("Unsupported data type for uint buffer {}, only Signed 32 or Signed 64 are supported. {}",
316 GetDataTypeName(info.GetDataType()),
317 location.AsString()));
318 }
Mike Kelly0d77ae12022-01-07 17:42:27 +0000319 return buffer;
320}
321
telsoa01c577f2c2018-08-31 09:22:23 +0100322#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
323 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
324
325bool IsActivationSupported(tflite::ActivationFunctionType activationType)
326{
327 switch(activationType)
328 {
329 case tflite::ActivationFunctionType_NONE:
330 case tflite::ActivationFunctionType_RELU:
331 case tflite::ActivationFunctionType_RELU6:
332 case tflite::ActivationFunctionType_TANH:
333 {
334 return true;
335 }
336 default:
337 {
338 return false;
339 }
340 }
341}
342
343#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
344 do { \
345 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
346 { \
347 throw ParseException( \
Mike Kelly377fb212023-01-10 15:55:28 +0000348 fmt::format("TfLite parser doesn't support fused activation: " \
James Ward58dec6b2020-09-11 17:32:44 +0100349 "{}/{} in {} subgraph:{} operator:{} at {}", \
350 OPTION->fused_activation_function, \
351 tflite::EnumNameActivationFunctionType(\
352 OPTION->fused_activation_function), \
353 __func__, \
354 SUBGRAPH_INDEX, \
355 OPERATOR_INDEX, \
356 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100357 } \
358 } while(false)
359
360
Mike Kelly0d77ae12022-01-07 17:42:27 +0000361std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t>& in)
telsoa01c577f2c2018-08-31 09:22:23 +0100362{
363 std::vector<unsigned int> result;
364 result.reserve(in.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +0000365 for (auto& i : in)
telsoa01c577f2c2018-08-31 09:22:23 +0100366 {
mathad01c21025d2021-04-26 10:09:37 +0100367 // If the location of the input data is -1 then the input should be ignored.
368 if (i == -1)
369 {
370 continue;
371 }
telsoa01c577f2c2018-08-31 09:22:23 +0100372 result.push_back(CHECKED_NON_NEGATIVE(i));
373 }
374 return result;
375}
376
Mike Kelly5880b912022-01-28 16:18:54 +0000377bool IsOptionalOperandPresent(int input)
378{
379 return (input >= 0);
380}
381
telsoa01c577f2c2018-08-31 09:22:23 +0100382void CalcPadding(uint32_t inputSize,
383 uint32_t filterSize,
384 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100385 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100386 uint32_t& paddingFront,
387 uint32_t& paddingBack,
388 tflite::Padding padding)
389{
390 paddingFront = 0;
391 paddingBack = 0;
392 if (padding == tflite::Padding_SAME)
393 {
394 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100395 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
396 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100397 if (temp > inputSize)
398 {
399 paddingFront = (temp - inputSize) / 2;
400 paddingBack = (temp - inputSize) - paddingFront;
401 }
402 }
403}
404
Teresa Charlin024ef0b2023-04-26 11:19:03 +0100405// Function that calculates explicit padding when the output shape is known.
406// At the moment the output is only given as an input parameter in Transpose Convolution,
407// not in Convolution and Depthwise Convolution
408void CalcPadding(uint32_t inputSize,
409 uint32_t filterSize,
410 uint32_t stride,
411 uint32_t dilation,
412 uint32_t& paddingFront,
413 uint32_t& paddingBack,
414 tflite::Padding padding,
415 uint32_t outputSize)
416{
417 IgnoreUnused(dilation);
418 paddingFront = 0;
419 paddingBack = 0;
420 if (padding == tflite::Padding_SAME)
421 {
422 uint32_t totalPadding = (inputSize - 1) * stride + filterSize - outputSize;
423 paddingFront = totalPadding / 2;
424 paddingBack = totalPadding - paddingFront;
425 }
426}
427
Kevin May7d96b162021-02-03 17:38:41 +0000428armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Finn Williamsb49ed182021-06-29 15:50:08 +0100429 const std::vector<unsigned int>& shape,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100430 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100431{
432 armnn::DataType type;
433 CHECK_TENSOR_PTR(tensorPtr);
434
435 switch (tensorPtr->type)
436 {
437 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000438 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100439 break;
440 case tflite::TensorType_FLOAT32:
441 type = armnn::DataType::Float32;
442 break;
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100443 case tflite::TensorType_FLOAT16:
444 type = armnn::DataType::Float16;
445 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000446 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000447 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000448 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000449 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000450 type = armnn::DataType::QAsymmS8;
451 }
452 else
453 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000454 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000455 type = armnn::DataType::QSymmS8;
456 }
Finn Williamsed66d142019-12-06 09:55:55 +0000457 break;
458 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000459 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000460 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100461 case tflite::TensorType_INT32:
462 type = armnn::DataType::Signed32;
463 break;
Inki Daed4619e22020-09-10 15:33:54 +0900464 case tflite::TensorType_INT64:
465 type = armnn::DataType::Signed64;
466 break;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100467 case tflite::TensorType_BOOL:
468 type = armnn::DataType::Boolean;
469 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100470 default:
471 {
472 CheckLocation location = CHECK_LOCATION();
473 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100474 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
475 tensorPtr->type,
476 tflite::EnumNameTensorType(tensorPtr->type),
477 tensorPtr->name,
478 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100479 }
480 }
Finn Williamsb49ed182021-06-29 15:50:08 +0100481 TensorShape tensorShape;
482
483 std::vector<unsigned int> safeShape = shape;
484 if (shape.size() == 0)
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100485 {
486 safeShape.push_back(1);
Finn Williamsb49ed182021-06-29 15:50:08 +0100487 }
488
489 if (!outputTensor)
490 {
491 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()), safeShape.data());
492 }
493 else
494 {
Rob Hughesd812a312021-08-06 13:10:53 +0100495 size_t shapeSignatureSize = tensorPtr->shape_signature.size();
Finn Williamsb49ed182021-06-29 15:50:08 +0100496
497 // If a shape signature exists we will use that to infer dynamic tensors
498 if (shapeSignatureSize != 0)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100499 {
Finn Williamsb49ed182021-06-29 15:50:08 +0100500 // If the shape is incompatible with the shape signature override the shape
501 if (shapeSignatureSize != shape.size())
502 {
503 safeShape = {};
504
505 for (unsigned int i = 0; i < shapeSignatureSize; ++i)
506 {
507 unsigned int dim = tensorPtr->shape_signature[i] > -1 ?
508 static_cast<unsigned int>(tensorPtr->shape_signature[i]) : 0;
509 safeShape.push_back(dim);
510 }
511 }
512
Rob Hughesd812a312021-08-06 13:10:53 +0100513 std::unique_ptr<bool[]> dimMask = std::make_unique<bool[]>(tensorPtr->shape_signature.size());
Mike Kelly04d82292023-01-19 18:29:40 +0000514 bool batchOnly = true;
Finn Williamsb49ed182021-06-29 15:50:08 +0100515 for (unsigned int i = 0; i < tensorPtr->shape_signature.size(); ++i)
516 {
Mike Kelly04d82292023-01-19 18:29:40 +0000517 dimMask[i] = tensorPtr->shape_signature[i] != -1;
518
519 if (i > 0 && !dimMask[i])
520 {
521 batchOnly = false;
522 }
523 }
524 if (batchOnly)
525 {
526 dimMask[0] = true;
Finn Williamsb49ed182021-06-29 15:50:08 +0100527 }
Rob Hughesd812a312021-08-06 13:10:53 +0100528 tensorShape = TensorShape(static_cast<unsigned int>(safeShape.size()), safeShape.data(), dimMask.get());
Finn Williamsb49ed182021-06-29 15:50:08 +0100529 }
530 // If there is no shape signature treat the tensor as dynamic if the shape has a size of zero
531 else if (shape.size() == 0)
532 {
533 tensorShape = TensorShape(1, false);
534 }
535 else
536 {
537 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(shape.size()), shape.data());
Sadik Armagand109a4d2020-07-28 10:42:13 +0100538 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100539 }
540
Teresa Charlinacb3ec52023-04-03 19:57:00 +0100541 float quantizationScale = 1.0f;
Keith Davisd305e1a2020-01-22 11:57:54 +0000542 int32_t quantizationOffset = 0;
543
544 if (tensorPtr->quantization.get())
545 {
546 if (tensorPtr->quantization->scale.size() <= 1)
547 {
548 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
549 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
550
551 if (tensorPtr->quantization->scale.size() == 1)
552 {
553 quantizationScale = tensorPtr->quantization->scale[0];
554 }
555 if (tensorPtr->quantization->zero_point.size() == 1)
556 {
557 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000558 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100559 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000560 }
561
Sadik Armagand109a4d2020-07-28 10:42:13 +0100562 armnn::TensorInfo result(tensorShape,
563 type,
564 quantizationScale,
565 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000566 return result;
567 }
568 else
569 {
570 std::vector<float> quantizationScales;
571 std::vector<int32_t> quantizationOffsets;
572
573 // Scale
574 std::copy(tensorPtr->quantization->scale.begin(),
575 tensorPtr->quantization->scale.end(),
576 std::back_inserter(quantizationScales));
577
Keith Davis0c2eeac2020-02-11 16:51:50 +0000578 // QSymmS8 Per-axis
Sadik Armagand109a4d2020-07-28 10:42:13 +0100579 armnn::TensorInfo result(tensorShape,
580 type,
581 quantizationScales,
Jan Eilers7612bd62021-04-06 17:29:03 +0100582 armnn::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
Keith Davisd305e1a2020-01-22 11:57:54 +0000583 return result;
584 }
585 }
586 else
587 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100588 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000589 type,
590 quantizationScale,
591 quantizationOffset);
592 return result;
593 }
telsoa01c577f2c2018-08-31 09:22:23 +0100594}
595
Kevin May7d96b162021-02-03 17:38:41 +0000596armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Mike Kelly377fb212023-01-10 15:55:28 +0000597 const bool outputTensor = false)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100598{
Mike Kelly0d77ae12022-01-07 17:42:27 +0000599 auto const& dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100600 return ToTensorInfo(tensorPtr, dimensions, outputTensor);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100601}
602
telsoa01c577f2c2018-08-31 09:22:23 +0100603template<typename T>
604std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
Kevin May7d96b162021-02-03 17:38:41 +0000605CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
606 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000607 armnn::TensorInfo& tensorInfo,
608 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100609{
Jan Eilers8eb25602020-03-09 12:13:48 +0000610 IgnoreUnused(tensorPtr);
Ryan OSheac229b3f2023-06-27 22:34:54 +0100611
612 if (!tensorPtr)
613 {
614 throw armnn::ParseException(fmt::format("Tensor pointer is null {}", CHECK_LOCATION().AsString()));
615 }
616
617 if (!bufferPtr)
618 {
619 throw armnn::ParseException(fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
620 }
telsoa01c577f2c2018-08-31 09:22:23 +0100621
622 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000623
624 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
625 {
626 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000627 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
628 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000629 }
630 else
631 {
632 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
633 }
634
Matthew Sloyan81beae32021-07-13 19:46:11 +0100635 // Make sure isConstant flag is set.
636 tensorInfo.SetConstant();
637
telsoa01c577f2c2018-08-31 09:22:23 +0100638 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
639}
640
telsoa01c577f2c2018-08-31 09:22:23 +0100641armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
642{
643 // generate the binding id by shifting the tensor id by 8 bit
644 // and add the subgraph id, which allows 256 subgraphs
645 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
646}
647
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000648bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
649{
650 const unsigned int actualSize = actual.GetNumDimensions();
651 if (actualSize != expected.size())
652 {
653 return false;
654 }
655
656 for (unsigned int i = 0u; i < actualSize; i++)
657 {
658 if (expected[i] < 0 ||
659 actual[i] != static_cast<unsigned int>(expected[i]))
660 {
661 return false;
662 }
663 }
664
665 return true;
666}
667
Cathal Corbett2b922e22022-09-23 15:49:24 +0100668bool CheckShape(const armnn::TensorShape& actual, const armnn::TensorShape& expected)
669{
670 std::vector<int32_t> expectedVec;
671 for (uint32_t i = 0; i < expected.GetNumDimensions(); i++)
672 {
673 expectedVec.push_back(expected[i]);
674 }
675 return CheckShape(actual, expectedVec);
676}
677
James Conroy05102392020-06-24 15:39:55 +0100678void CheckMatchingQuantization(const TensorInfo& first,
679 const TensorInfo& second,
680 const std::string& descName,
681 std::string const& firstName,
682 std::string const& secondName)
683{
684 if (!first.IsQuantized() ||
685 !second.IsQuantized())
686 {
687 // Not a quantized type, ignore the validation
688 return;
689 }
690
691 DataType firstDataType = first.GetDataType();
692 DataType secondDataType = second.GetDataType();
693
694 if (firstDataType != secondDataType)
695 {
696 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
697 " must be of the same quantized type, " +
698 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
699 secondName + " is " + GetDataTypeName(secondDataType));
700 }
701
702 if (!first.IsTypeSpaceMatch(second))
703 {
704 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
705 " must have the same quantization space, " +
706 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
707 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
708 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
709 " and scale " + std::to_string(second.GetQuantizationScale()));
710 }
711}
712
Mike Kelly377fb212023-01-10 15:55:28 +0000713bool IsDynamic(TfLiteParserImpl::TensorRawPtr tensorPtr)
714{
715 auto shape = tensorPtr->shape;
716
717 if (shape.empty())
718 {
719 return true;
720 }
721 auto shapeSig = tensorPtr->shape_signature;
722
723 if (shapeSig.empty())
724 {
725 return false;
726 }
727
728 for (unsigned int i = 0; i < shapeSig.size() ; ++i)
729 {
730 if (shapeSig[i] == -1)
731 {
732 return true;
733 }
734 }
735 return false;
736}
737
telsoa01c577f2c2018-08-31 09:22:23 +0100738} // <anonymous>
739
Kevin May7d96b162021-02-03 17:38:41 +0000740TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100741: m_Options(options)
742, m_Network(nullptr, nullptr)
Kevin May7d96b162021-02-03 17:38:41 +0000743, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
telsoa01c577f2c2018-08-31 09:22:23 +0100744{
745 // register supported operators
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100746 m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
Kevin May7d96b162021-02-03 17:38:41 +0000747 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100748 m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
749 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
Kevin May7d96b162021-02-03 17:38:41 +0000750 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
751 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
Samuel Yapfd3ba5a2022-08-24 17:04:34 +0100752 m_ParserFunctions[tflite::BuiltinOperator_BATCH_MATMUL] = &TfLiteParserImpl::ParseBatchMatMul;
Idriss Chaouch564c13d2023-09-01 17:58:38 +0100753 m_ParserFunctions[tflite::BuiltinOperator_BROADCAST_TO] = &TfLiteParserImpl::ParseBroadcastTo;
Teresa Charlin93f0ad02023-03-23 15:28:02 +0000754 m_ParserFunctions[tflite::BuiltinOperator_CEIL] = &TfLiteParserImpl::ParseCeil;
mathad01b392e982021-04-07 12:07:30 +0100755 m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
Kevin May7d96b162021-02-03 17:38:41 +0000756 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
757 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100758 // Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
Cathal Corbett80b4ef02022-05-25 11:21:11 +0100759 #if defined(ARMNN_POST_TFLITE_2_4)
Matthew Sloyaneb5f8102021-10-05 17:31:42 +0100760 m_ParserFunctions[tflite::BuiltinOperator_CONV_3D] = &TfLiteParserImpl::ParseConv3D;
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100761 #endif
Kevin May7d96b162021-02-03 17:38:41 +0000762 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
763 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
764 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
765 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100766 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000767 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300768 m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000769 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
Teresa Charlin3ab85482021-06-08 16:59:29 +0100770 m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
Teresa Charlincdbd40b2022-02-25 13:21:55 +0000771 m_ParserFunctions[tflite::BuiltinOperator_FLOOR_DIV] = &TfLiteParserImpl::ParseFloorDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000772 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
773 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
Teresa Charlin91a53ea2022-04-25 15:47:29 +0100774 m_ParserFunctions[tflite::BuiltinOperator_GATHER_ND] = &TfLiteParserImpl::ParseGatherNd;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300775 m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
776 m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000777 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
778 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300779 m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
780 m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
Mike Kelly31dce2b2021-09-01 21:22:37 +0100781 m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
782 = &TfLiteParserImpl::ParseLocalResponseNormalization;
Teresa Charlin28aa6692022-07-12 11:18:44 +0100783 m_ParserFunctions[tflite::BuiltinOperator_LOG] = &TfLiteParserImpl::ParseLog;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100784 m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
Kevin May7d96b162021-02-03 17:38:41 +0000785 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
Teresa Charlinfd33a692022-06-29 15:35:57 +0100786 m_ParserFunctions[tflite::BuiltinOperator_LOG_SOFTMAX] = &TfLiteParserImpl::ParseLogSoftmax;
Kevin May7d96b162021-02-03 17:38:41 +0000787 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
788 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
789 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
790 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
791 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +0100792 m_ParserFunctions[tflite::BuiltinOperator_MIRROR_PAD] = &TfLiteParserImpl::ParseMirrorPad;
Kevin May7d96b162021-02-03 17:38:41 +0000793 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
794 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300795 m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000796 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
797 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
Mike Kelly0d77ae12022-01-07 17:42:27 +0000798 m_ParserFunctions[tflite::BuiltinOperator_PADV2] = &TfLiteParserImpl::ParsePad;
John Mcloughlin0ec00872023-05-15 17:03:49 +0100799 m_ParserFunctions[tflite::BuiltinOperator_POW] = &TfLiteParserImpl::ParsePower;
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +0100800 m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
Kevin May7d96b162021-02-03 17:38:41 +0000801 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
802 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
803 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
Sadik Armagana2747482021-02-09 10:28:54 +0000804 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
805 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
Teresa Charlin4e3e8312021-08-05 12:34:37 +0100806 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
Kevin May7d96b162021-02-03 17:38:41 +0000807 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
808 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
809 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
Tianle Chenge5a30ff2023-07-03 11:24:12 +0100810 m_ParserFunctions[tflite::BuiltinOperator_REVERSE_V2] = &TfLiteParserImpl::ParseReverseV2;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100811 m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
Teresa Charlinf0fce5b2022-05-04 17:24:43 +0100812 m_ParserFunctions[tflite::BuiltinOperator_SQRT] = &TfLiteParserImpl::ParseSqrt;
Keith Davis0176fd82021-06-01 17:36:32 +0100813 m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
Teresa Charlin28aa6692022-07-12 11:18:44 +0100814 m_ParserFunctions[tflite::BuiltinOperator_SIN] = &TfLiteParserImpl::ParseSin;
Kevin May7d96b162021-02-03 17:38:41 +0000815 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
816 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
817 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
Teresa Charlin2a764ad2023-02-24 18:17:31 +0000818 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_DEPTH] = &TfLiteParserImpl::ParseSpaceToDepth;
Kevin May7d96b162021-02-03 17:38:41 +0000819 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
820 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
821 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
Teresa Charlin6963b332023-07-11 11:35:41 +0100822 m_ParserFunctions[tflite::BuiltinOperator_SQUARE] = &TfLiteParserImpl::ParseSquare;
John Mcloughlin0ec00872023-05-15 17:03:49 +0100823 m_ParserFunctions[tflite::BuiltinOperator_SQUARED_DIFFERENCE] = &TfLiteParserImpl::ParseSquaredDifference;
Kevin May7d96b162021-02-03 17:38:41 +0000824 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
825 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
826 m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
827 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
Teresa Charlin777008b2023-07-26 10:07:55 +0100828 m_ParserFunctions[tflite::BuiltinOperator_TILE] = &TfLiteParserImpl::ParseTile;
Kevin May7d96b162021-02-03 17:38:41 +0000829 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
830 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
Mike Kelly5880b912022-01-28 16:18:54 +0000831 m_ParserFunctions[tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM]
832 = &TfLiteParserImpl::ParseUnidirectionalSequenceLSTM;
Kevin May7d96b162021-02-03 17:38:41 +0000833 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100834
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100835 // register supported custom operators
Kevin May7d96b162021-02-03 17:38:41 +0000836 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100837}
838
Mike Kelly377fb212023-01-10 15:55:28 +0000839armnn::TensorInfo TfLiteParserImpl::InputTensorInfo(size_t subgraphIndex,
840 size_t operatorIndex,
841 int input)
842{
843 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
844 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
845
846 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[input]);
847 auto search = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(inputId);
848
849 if (search != m_TensorInfos.end())
850 {
851 return m_TensorInfos[inputId];
852 }
853 else
854 {
855 auto tensorInfo = ::armnnTfLiteParser::ToTensorInfo(subgraphPtr->tensors[inputId].get());
856 m_TensorInfos.insert({ inputId, tensorInfo });
857 return tensorInfo;
858 }
859}
860
861armnn::TensorInfo TfLiteParserImpl::OutputTensorInfoFromInputs(size_t subgraphIndex,
862 size_t operatorIndex,
863 armnn::IConnectableLayer* layer,
864 int output,
865 std::vector<int> inputs)
866{
867 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
868 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
869
870 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[output]);
871
872 auto outputSearch = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(outputId);
873
874 if (outputSearch != m_TensorInfos.end())
875 {
876 return m_TensorInfos[outputId];
877 }
878
879 const auto& outputTensorPtr = subgraphPtr->tensors[outputId].get();
880 TensorInfo tensor = ::armnnTfLiteParser::ToTensorInfo(outputTensorPtr, true);
881
882 if (IsDynamic(outputTensorPtr))
883 {
884 if (inputs.empty())
885 {
886 for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
887 {
888 inputs.emplace_back(i);
889 }
890 }
891 auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex);
892 std::vector<armnn::TensorShape> inputShapes;
893
894 for (unsigned int i = 0; i < inputs.size(); ++i)
895 {
896 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[inputs[i]]);
897 auto search = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(inputId);
898
899 if (search != m_TensorInfos.end())
900 {
901 auto &inputTensorInfo = m_TensorInfos[inputId];
902 inputShapes.push_back(inputTensorInfo.GetShape());
903 }
904 else
905 {
Mike Kelly377fb212023-01-10 15:55:28 +0000906 auto inputTensorInfo = ::armnnTfLiteParser::ToTensorInfo(subgraphPtr->tensors[inputId].get());
907 m_TensorInfos.insert({ inputId, inputTensorInfo});
908 inputShapes.push_back(inputTensorInfo.GetShape());
909 }
910 }
911 const auto outputShape = layer->InferOutputShapes(inputShapes)[output];
912 tensor.SetShape(outputShape);
913 }
914 m_TensorInfos.insert({ outputId, tensor});
915 return tensor;
916}
917
918armnn::TensorInfo TfLiteParserImpl::OutputTensorInfoFromShapes(size_t subgraphIndex,
919 size_t operatorIndex,
920 armnn::IConnectableLayer* layer,
921 int output,
922 std::vector<armnn::TensorShape> inputShapes)
923{
924 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
925 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
926
927 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[output]);
928 const auto& outputTensorPtr = subgraphPtr->tensors[outputId].get();
929 TensorInfo tensor = ::armnnTfLiteParser::ToTensorInfo(outputTensorPtr, true);
930
931 if (IsDynamic(outputTensorPtr))
932 {
933 const auto outputShape = layer->InferOutputShapes(inputShapes)[output];
934 tensor.SetShape(outputShape);
935 }
936 m_TensorInfos.insert({ outputId, tensor});
937 return tensor;
938}
939
Kevin May7d96b162021-02-03 17:38:41 +0000940void TfLiteParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100941{
942 m_Network = armnn::INetworkPtr(nullptr, nullptr);
943 m_Model = nullptr;
944 m_SubgraphConnections.clear();
Mike Kelly377fb212023-01-10 15:55:28 +0000945 m_OverriddenOutputShapes.clear();
Mike Kelly5880b912022-01-28 16:18:54 +0000946 m_ConstantsToDequantize.clear();
947 m_ConstantsToBeCreated.clear();
Mike Kelly377fb212023-01-10 15:55:28 +0000948 m_TensorInfos.clear();
telsoa01c577f2c2018-08-31 09:22:23 +0100949}
950
Kevin May7d96b162021-02-03 17:38:41 +0000951INetworkPtr TfLiteParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100952{
953 ResetParser();
954 m_Model = LoadModelFromFile(graphFile);
955 return CreateNetworkFromModel();
956}
957
Mike Kelly0d77ae12022-01-07 17:42:27 +0000958INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
telsoa01c577f2c2018-08-31 09:22:23 +0100959{
960 ResetParser();
961 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
962 return CreateNetworkFromModel();
963}
964
Finn Williamsb49ed182021-06-29 15:50:08 +0100965
966armnn::INetworkPtr TfLiteParserImpl::LoadModel(std::unique_ptr<tflite::ModelT> model)
967{
968 ResetParser();
969 m_Model = std::move(model);
970
971 return CreateNetworkFromModel();
972}
973
Kevin May7d96b162021-02-03 17:38:41 +0000974INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
telsoa01c577f2c2018-08-31 09:22:23 +0100975{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100976
977 using NetworkOptions = std::vector<BackendOptions>;
978 NetworkOptions networkOptions = {};
Mike Kelly80512b02022-05-16 23:10:42 +0100979 if (m_Options)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100980 {
Mike Kelly80512b02022-05-16 23:10:42 +0100981 if (m_Options.value().m_InferAndValidate)
982 {
983 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
984 {
985 { "InferAndValidate", true }
986 });
Sadik Armagand109a4d2020-07-28 10:42:13 +0100987
Mike Kelly80512b02022-05-16 23:10:42 +0100988 networkOptions.push_back(shapeInferenceMethodOption);
989 }
990 if (m_Options.value().m_AllowExpandedDims)
991 {
992 BackendOptions shapeInferenceMethodOption("AllowExpandedDims",
993 {
994 { "AllowExpandedDims", true }
995 });
996
997 networkOptions.push_back(shapeInferenceMethodOption);
998 }
Sadik Armagand109a4d2020-07-28 10:42:13 +0100999 }
Sadik Armagand109a4d2020-07-28 10:42:13 +01001000 m_Network = INetwork::Create(networkOptions);
Ryan OSheac229b3f2023-06-27 22:34:54 +01001001
1002 if (m_Model.get() == nullptr)
1003 {
1004 throw ParseException(fmt::format("Tflite Model pointer is null {}", CHECK_LOCATION().AsString()));
1005 }
telsoa01c577f2c2018-08-31 09:22:23 +01001006
telsoa01c577f2c2018-08-31 09:22:23 +01001007 if (m_Model->subgraphs.size() != 1)
1008 {
1009 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001010 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
1011 m_Model->subgraphs.size(),
1012 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001013 }
1014
1015 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +01001016 size_t operatorIndex = 0;
1017 try
telsoa01c577f2c2018-08-31 09:22:23 +01001018 {
Colm Donelan6350d272020-06-09 16:56:25 +01001019 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +01001020 {
Mike Kelly377fb212023-01-10 15:55:28 +00001021 SetupInputLayerTensorInfos(subgraphIndex);
1022 SetupConstantLayerTensorInfos(subgraphIndex);
1023
Colm Donelan6350d272020-06-09 16:56:25 +01001024 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
1025 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +01001026 {
Colm Donelan6350d272020-06-09 16:56:25 +01001027 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
Jim Flynnfca233e2021-09-23 12:16:53 +01001028
1029// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001030#if defined(ARMNN_POST_TFLITE_2_3)
Jim Flynnfca233e2021-09-23 12:16:53 +01001031 auto builtinCode = std::max(opCodePtr->builtin_code,
1032 static_cast<tflite::BuiltinOperator>(opCodePtr->deprecated_builtin_code));
1033#else
telsoa01c577f2c2018-08-31 09:22:23 +01001034 auto builtinCode = opCodePtr->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +01001035#endif
telsoa01c577f2c2018-08-31 09:22:23 +01001036
1037 if (builtinCode > tflite::BuiltinOperator_MAX)
1038 {
James Ward58dec6b2020-09-11 17:32:44 +01001039 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
1040 "subgraph:{} operator idx:{}. {}",
1041 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
1042 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001043 }
1044
1045 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +01001046 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +01001047 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +01001048 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +01001049 }
telsoa01c577f2c2018-08-31 09:22:23 +01001050
Colm Donelan6350d272020-06-09 16:56:25 +01001051 SetupInputLayers(subgraphIndex);
1052 SetupOutputLayers(subgraphIndex);
1053 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001054
Colm Donelan6350d272020-06-09 16:56:25 +01001055 ++subgraphIndex;
1056 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001057 }
telsoa01c577f2c2018-08-31 09:22:23 +01001058 }
Colm Donelan6350d272020-06-09 16:56:25 +01001059 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +01001060 {
Colm Donelan6350d272020-06-09 16:56:25 +01001061 std::stringstream errorString;
1062 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
1063 << subgraphIndex << " error: " << e.what();
1064 ARMNN_LOG(error) << errorString.str();
1065 std::stringstream errors;
1066 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +01001067 throw ParseException(errors.str());
1068 }
1069
1070 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +01001071 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001072 {
1073 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
1074 {
1075 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
1076 {
1077 for (size_t inputSlotIdx = 0;
1078 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
1079 ++inputSlotIdx)
1080 {
1081 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
1082 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
1083 }
1084 }
1085 }
1086 }
telsoa01c577f2c2018-08-31 09:22:23 +01001087 return std::move(m_Network);
1088}
1089
Mike Kelly0506ef02023-01-03 16:29:44 +00001090bool TfLiteParserImpl::ShouldConstantTensorBeConverted(TfLiteParserImpl::TensorRawPtr tensorPtr,
1091 armnn::DataType inputDataType,
1092 armnn::DataType tensorDataType)
Mike Kelly5880b912022-01-28 16:18:54 +00001093{
Mike Kelly0506ef02023-01-03 16:29:44 +00001094 return (TfLiteParserImpl::IsConstTensor(tensorPtr) && inputDataType == DataType::Float32 &&
1095 (tensorDataType == DataType::QAsymmU8 ||
1096 tensorDataType == DataType::QAsymmS8 ||
1097 tensorDataType == DataType::QSymmS8 ||
1098 tensorDataType == DataType::Signed32 ||
1099 tensorDataType == DataType::Signed64));
Mike Kelly5880b912022-01-28 16:18:54 +00001100}
1101
Kevin May7d96b162021-02-03 17:38:41 +00001102void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
1103 size_t tensorIndex,
1104 armnn::IOutputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +01001105{
1106 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001107
1108 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
1109
Nikhil Rajd4d1c312022-08-03 18:20:59 +01001110 if (slot->GetOwningIConnectableLayer().GetType() != LayerType::Constant)
telsoa01c577f2c2018-08-31 09:22:23 +01001111 {
telsoa01c577f2c2018-08-31 09:22:23 +01001112
Nikhil Rajd4d1c312022-08-03 18:20:59 +01001113 // assuming there is only one producer for that tensor
1114 if (tensorSlots.outputSlot != nullptr)
1115 {
1116 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
1117 "subgraph:{} tensor:{} {}",
1118 subgraphIndex,
1119 tensorIndex,
1120 CHECK_LOCATION().AsString()));
1121 }
1122 }
telsoa01c577f2c2018-08-31 09:22:23 +01001123 tensorSlots.outputSlot = slot;
1124}
1125
Kevin May7d96b162021-02-03 17:38:41 +00001126void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
1127 size_t tensorIndex,
1128 armnn::IInputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +01001129{
1130 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001131
Finn Williamsd4fa5452021-03-01 12:31:41 +00001132 TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01001133 tensorSlots.inputSlots.push_back(slot);
1134}
1135
Kevin May7d96b162021-02-03 17:38:41 +00001136void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001137{
1138 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1139
1140 // NOTE: By default we presume the custom operator is not supported
Kevin May7d96b162021-02-03 17:38:41 +00001141 auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001142
1143 // Identify custom code defined for custom operator
1144 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1145 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
1146
Mike Kelly377fb212023-01-10 15:55:28 +00001147 // Find parser function that corresponds to custom code (if any)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001148 auto iterator = m_CustomParserFunctions.find(customCode);
1149 if (iterator != m_CustomParserFunctions.end())
1150 {
1151 customParserFunction = iterator->second;
1152 }
1153
1154 // Run parser function
1155 (this->*customParserFunction)(subgraphIndex, operatorIndex);
1156}
1157
Kevin May7d96b162021-02-03 17:38:41 +00001158void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001159{
1160 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001161
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001162 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1163
1164 auto opcodeIndex = operatorPtr->opcode_index;
Jim Flynnfca233e2021-09-23 12:16:53 +01001165
1166// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001167#if defined(ARMNN_POST_TFLITE_2_3)
Jim Flynnfca233e2021-09-23 12:16:53 +01001168 auto opcode = std::max(m_Model->operator_codes[opcodeIndex]->builtin_code,
1169 static_cast<tflite::BuiltinOperator>(m_Model->operator_codes[opcodeIndex]->deprecated_builtin_code));
1170#else
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001171 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +01001172#endif
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001173
1174 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
1175 {
1176 // Do not add StandInLayer, throw ParseException instead
1177 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001178 fmt::format("Operator not supported. "
1179 "subgraph:{} operator:{} "
1180 "opcode_index:{} opcode:{} / {} {}",
1181 subgraphIndex,
1182 operatorIndex,
1183 opcodeIndex,
1184 opcode,
1185 tflite::EnumNameBuiltinOperator(opcode),
1186 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001187 }
1188
1189 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1190 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1191
Matthew Sloyan589e3e82020-09-11 16:17:48 +01001192 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
1193 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001194
1195 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +01001196 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001197
1198 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
1199 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001200
1201 if (!layer)
1202 {
1203 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1204 operatorIndex, CHECK_LOCATION().AsString()));
1205 }
James Conroy05102392020-06-24 15:39:55 +01001206
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001207 for (unsigned int i = 0u; i < numOutputs; ++i)
1208 {
Mike Kelly04d82292023-01-19 18:29:40 +00001209 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[0], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001210 }
1211
1212 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1213 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1214
1215 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
1216 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +01001217}
1218
mathad01b392e982021-04-07 12:07:30 +01001219void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex)
1220{
1221 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1222
1223 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1224 CHECK_VALID_SIZE(inputs.size(), 1);
1225 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1226 CHECK_VALID_SIZE(outputs.size(), 1);
1227
1228 auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex);
1229
1230 IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001231
1232 if (!layer)
1233 {
1234 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1235 operatorIndex, CHECK_LOCATION().AsString()));
1236 }
mathad01b392e982021-04-07 12:07:30 +01001237
Mike Kelly377fb212023-01-10 15:55:28 +00001238 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
mathad01b392e982021-04-07 12:07:30 +01001239 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1240
1241 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1242 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1243
1244 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1245 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1246}
1247
Kevin May7d96b162021-02-03 17:38:41 +00001248void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001249{
1250 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1251
Mike Kelly0d77ae12022-01-07 17:42:27 +00001252 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1253 const auto* options = operatorPtr->builtin_options.AsConv2DOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01001254
1255 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1256
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001257 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1258 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1259 CHECK_VALID_SIZE(outputs.size(), 1);
1260
telsoa01c577f2c2018-08-31 09:22:23 +01001261 Convolution2dDescriptor desc;
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001262 inputs.size() == 3 ?
1263 desc.m_BiasEnabled = true : desc.m_BiasEnabled = false;
telsoa01c577f2c2018-08-31 09:22:23 +01001264 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1265 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001266 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +01001267 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1268 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001269
Mike Kelly377fb212023-01-10 15:55:28 +00001270 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1271 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001272
1273 // assuming input is NHWC
1274 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001275 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
telsoa01c577f2c2018-08-31 09:22:23 +01001276
1277 // assuming the filter is OHWI : Output, H, W, Input
1278 // which is essentially the same as NHWC
1279 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001280 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
telsoa01c577f2c2018-08-31 09:22:23 +01001281
Pablo Tellof0bd6832019-04-26 17:58:13 +01001282 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1283 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1284 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1285 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001286
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001287 // Add the first input and weights tensor to the registration list.
1288 // The constant weights will be added by SetupConstantLayers.
1289 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1290 std::vector<unsigned int> tensorIndexesToRegister = { inputTensorIndexes[0], inputTensorIndexes[1] };
telsoa01c577f2c2018-08-31 09:22:23 +01001291
James Ward58dec6b2020-09-11 17:32:44 +01001292 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001293 armnn::IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, layerName.c_str());
telsoa01c577f2c2018-08-31 09:22:23 +01001294
Mike Kelly0506ef02023-01-03 16:29:44 +00001295 if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
telsoa01c577f2c2018-08-31 09:22:23 +01001296 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001297 m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
telsoa01c577f2c2018-08-31 09:22:23 +01001298 }
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001299
1300 if (desc.m_BiasEnabled)
telsoa01c577f2c2018-08-31 09:22:23 +01001301 {
Mike Kelly377fb212023-01-10 15:55:28 +00001302 armnn::TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001303
1304 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1305 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
1306
Mike Kelly0506ef02023-01-03 16:29:44 +00001307 if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001308 {
1309 m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
1310 }
telsoa01c577f2c2018-08-31 09:22:23 +01001311 }
1312
Ryan OSheac229b3f2023-06-27 22:34:54 +01001313 if (!layer)
1314 {
1315 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1316 operatorIndex, CHECK_LOCATION().AsString()));
1317 }
telsoa01c577f2c2018-08-31 09:22:23 +01001318
Mike Kelly377fb212023-01-10 15:55:28 +00001319 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
jimfly01c25411c2018-11-14 17:47:22 +00001320 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001321
1322 // register the input connection slots for the layer, connections are made after all layers have been created
1323 // only the tensors for the inputs are relevant, exclude the const tensors
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001324 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
telsoa01c577f2c2018-08-31 09:22:23 +01001325
jimfly01c25411c2018-11-14 17:47:22 +00001326 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001327 // register the output connection slots for the layer, connections are made after all layers have been created
1328 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001329 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, { outputTensorIndexes[0] });
telsoa01c577f2c2018-08-31 09:22:23 +01001330}
1331
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001332// Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
Cathal Corbett80b4ef02022-05-25 11:21:11 +01001333#if defined(ARMNN_POST_TFLITE_2_4)
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001334void TfLiteParserImpl::ParseConv3D(size_t subgraphIndex, size_t operatorIndex)
1335{
1336 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1337
1338 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1339 const auto* options = operatorPtr->builtin_options.AsConv3DOptions();
1340
1341 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1342
1343 Convolution3dDescriptor desc;
1344 desc.m_BiasEnabled = false;
1345 desc.m_DataLayout = armnn::DataLayout::NDHWC;
1346 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1347 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1348 desc.m_StrideZ = CHECKED_NON_NEGATIVE(options->stride_d);
1349 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1350 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
1351 desc.m_DilationZ = CHECKED_NON_NEGATIVE(options->dilation_d_factor);
1352
1353 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1354 CHECK_VALID_SIZE(inputs.size(), 2, 3);
1355
1356 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1357 CHECK_VALID_SIZE(outputs.size(), 1);
1358
Mike Kelly377fb212023-01-10 15:55:28 +00001359 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1360 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001361
1362 // Assuming input is NDHWC
1363 unsigned int inputDepth = inputTensorInfo.GetShape()[1];
1364 unsigned int inputHeight = inputTensorInfo.GetShape()[2];
1365 unsigned int inputWidth = inputTensorInfo.GetShape()[3];
1366
1367 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
1368 unsigned int filterDepth = filterTensorInfo.GetShape()[0];
1369 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1370 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1371
1372 CalcPadding(inputDepth, filterDepth, desc.m_StrideZ,
Teresa Charlin502ab942022-03-23 17:23:07 +00001373 desc.m_DilationZ, desc.m_PadFront, desc.m_PadBack, options->padding);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001374 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1375 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1376 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1377 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
1378
Mike Kelly5880b912022-01-28 16:18:54 +00001379 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001380
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001381 auto layerName = fmt::format("Conv3D:{}:{}", subgraphIndex, operatorIndex);
1382
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001383 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1384 // Add the first input and weights tensor to the registration list.
1385 // The constant weights will be added by SetupConstantLayers.
1386 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
1387
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001388 if (inputs.size() == 3)
1389 {
1390 desc.m_BiasEnabled = true;
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001391
1392 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1393 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001394 }
1395
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001396 armnn::IConnectableLayer* layer = m_Network->AddConvolution3dLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001397
1398 if (!layer)
1399 {
1400 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1401 operatorIndex, CHECK_LOCATION().AsString()));
1402 }
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001403
Mike Kelly377fb212023-01-10 15:55:28 +00001404 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001405 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1406
1407 // Register the input connection slots for the layer, connections are made after all layers have been created
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001408 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001409
1410 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1411 // Register the output connection slots for the layer, connections are made after all layers have been created
1412 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1413 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1414}
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001415#endif
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001416
Kevin May7d96b162021-02-03 17:38:41 +00001417void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001418{
1419 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1420
Mike Kelly0d77ae12022-01-07 17:42:27 +00001421 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1422 const auto* options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01001423
1424 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1425
1426 DepthwiseConvolution2dDescriptor desc;
telsoa01c577f2c2018-08-31 09:22:23 +01001427 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1428 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001429 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001430 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +01001431
1432 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1433 CHECK_VALID_SIZE(inputs.size(), 2, 3);
Cathal Corbett06902652022-04-14 17:55:11 +01001434 if (inputs.size() == 3)
1435 {
1436 desc.m_BiasEnabled = true;
1437 }
1438
telsoa01c577f2c2018-08-31 09:22:23 +01001439 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1440 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +01001441 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1442 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001443
Mike Kelly377fb212023-01-10 15:55:28 +00001444 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1445 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001446
Matteo Martincigh747ef822018-12-18 09:26:39 +00001447 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +01001448 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1449 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +00001450
1451 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +01001452 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1453 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1454
Pablo Tellof0bd6832019-04-26 17:58:13 +01001455 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1456 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1457 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1458 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001459
Jan Eilers53ef7952021-06-02 12:01:25 +01001460 // ArmNN uses the same filter tensor layout at TfLite [1, H, W, O] no need for any permutation
James Ward58dec6b2020-09-11 17:32:44 +01001461 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001462
Cathal Corbett06902652022-04-14 17:55:11 +01001463 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1464 // Add the first input and weights tensor to the registration list.
1465 // The constant weights will be added by SetupConstantLayers.
1466 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
1467
1468 armnn::IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, layerName.c_str());
1469
1470 if (desc.m_BiasEnabled)
telsoa01c577f2c2018-08-31 09:22:23 +01001471 {
1472 desc.m_BiasEnabled = true;
Mike Kelly377fb212023-01-10 15:55:28 +00001473 TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Cathal Corbett06902652022-04-14 17:55:11 +01001474
1475 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1476 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
telsoa01c577f2c2018-08-31 09:22:23 +01001477 }
Ryan OSheac229b3f2023-06-27 22:34:54 +01001478
1479 if (!layer)
1480 {
1481 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1482 operatorIndex, CHECK_LOCATION().AsString()));
1483 }
telsoa01c577f2c2018-08-31 09:22:23 +01001484
Mike Kelly377fb212023-01-10 15:55:28 +00001485 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
jimfly01c25411c2018-11-14 17:47:22 +00001486 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001487
1488 // register the input connection slots for the layer, connections are made after all layers have been created
1489 // only the tensors for the inputs are relevant, exclude the const tensors
Cathal Corbett06902652022-04-14 17:55:11 +01001490 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
telsoa01c577f2c2018-08-31 09:22:23 +01001491
jimfly01c25411c2018-11-14 17:47:22 +00001492 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001493 // register the output connection slots for the layer, connections are made after all layers have been created
1494 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1495 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1496}
1497
Kevin May7d96b162021-02-03 17:38:41 +00001498void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsed66d142019-12-06 09:55:55 +00001499{
1500 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1501
1502 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1503 CHECK_VALID_SIZE(inputs.size(), 1);
1504
1505 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1506 CHECK_VALID_SIZE(outputs.size(), 1);
1507
James Ward58dec6b2020-09-11 17:32:44 +01001508 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +00001509
1510 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001511
1512 if (!layer)
1513 {
1514 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1515 operatorIndex, CHECK_LOCATION().AsString()));
1516 }
Finn Williamsed66d142019-12-06 09:55:55 +00001517
Mike Kelly377fb212023-01-10 15:55:28 +00001518 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Finn Williamsed66d142019-12-06 09:55:55 +00001519 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1520
1521 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1522 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1523
1524 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1525 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1526}
1527
Teresa Charlin3ab85482021-06-08 16:59:29 +01001528void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorIndex)
1529{
1530 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1531
1532 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1533 CHECK_VALID_SIZE(inputs.size(), 2);
1534
1535 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1536 CHECK_VALID_SIZE(outputs.size(), 1);
1537
1538 auto layerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
1539
Mike Kelly377fb212023-01-10 15:55:28 +00001540 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001541 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001542 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1543
Teresa Charlina7a605a2023-06-14 14:51:17 +01001544 armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
1545
1546 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1547 if (axisBufferPtr == nullptr)
1548 {
1549 throw ParseException(fmt::format("{}: Operation has invalid inputs. Failed to read axis.",
1550 CHECK_LOCATION().AsString()));
1551 }
1552
1553 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
1554 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
1555 int32_t axis = axisData[0];
1556
1557 auto inputRank = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
1558 auto outputRank = inputRank + 1;
1559 if((axis < -1 * outputRank) || (outputRank <= axis))
1560 {
1561 throw ParseException(fmt::format("{}: Axis {} is not within [-{}, {}) range.",
1562 CHECK_LOCATION().AsString(), axis, outputRank, outputRank));
1563 }
1564
1565 axis = axis < 0 ? (axis + outputRank) : axis;
1566
1567 std::vector<unsigned int> shape(static_cast<unsigned int>(outputRank));
1568 unsigned int inputShapeIndex = 0;
1569 for (unsigned int i = 0; i < static_cast<unsigned int>(outputRank); ++i)
1570 {
1571 if (i == static_cast<unsigned int>(axis))
1572 {
1573 shape[i] = 1;
1574 }
1575 else
1576 {
1577 shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
1578 ++inputShapeIndex;
1579 }
1580 }
1581
Teresa Charlin3ab85482021-06-08 16:59:29 +01001582 ReshapeDescriptor reshapeDesc;
Teresa Charlina7a605a2023-06-14 14:51:17 +01001583 reshapeDesc.m_TargetShape = TensorShape(static_cast<unsigned int>(outputRank), shape.data());
1584 outputTensorInfo.SetShape(reshapeDesc.m_TargetShape);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001585
1586 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001587
1588 if (!layer)
1589 {
1590 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1591 operatorIndex, CHECK_LOCATION().AsString()));
1592 } layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001593
Teresa Charlina7a605a2023-06-14 14:51:17 +01001594 auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
1595 m_TensorInfos[outputTensorIds[0]] = outputTensorInfo;
1596
Teresa Charlin3ab85482021-06-08 16:59:29 +01001597 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1598 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1599
1600 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1601 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1602}
1603
Kevin May7d96b162021-02-03 17:38:41 +00001604void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
Keith Davis4cd29a02019-09-09 14:49:20 +01001605{
1606 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1607
1608 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001609 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001610
1611 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1612 CHECK_VALID_SIZE(outputs.size(), 1);
1613
James Ward58dec6b2020-09-11 17:32:44 +01001614 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001615 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001616
josh minorba424d22019-11-13 10:55:17 -06001617 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001618 {
Mike Kelly377fb212023-01-10 15:55:28 +00001619 armnn::TensorInfo permuteTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Kevin May85d92602019-09-27 17:21:06 +01001620 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001621 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1622 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001623 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001624 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001625
Mike Kelly08759e22020-03-02 11:41:31 +00001626 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001627 }
Mike Kelly377fb212023-01-10 15:55:28 +00001628 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Keith Davis4cd29a02019-09-09 14:49:20 +01001629
James Conroy05102392020-06-24 15:39:55 +01001630 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001631
1632 if (!layer)
1633 {
1634 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1635 operatorIndex, CHECK_LOCATION().AsString()));
1636 }
Mike Kelly377fb212023-01-10 15:55:28 +00001637
1638 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1639 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001640 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1641
1642 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1643 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1644
1645 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1646 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1647}
1648
Kevin May7d96b162021-02-03 17:38:41 +00001649void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001650{
1651 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1652
Mike Kelly0d77ae12022-01-07 17:42:27 +00001653 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1654 const auto* options = operatorPtr->builtin_options.AsTransposeConvOptions();
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001655
1656 TransposeConvolution2dDescriptor desc;
1657 desc.m_BiasEnabled = false;
1658 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1659 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1660 desc.m_DataLayout = armnn::DataLayout::NHWC;
1661
1662 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
David Monahan61683802021-01-12 09:11:07 +00001663 if (inputs.size() == 4)
1664 {
1665 desc.m_BiasEnabled = true;
1666 }
1667 else
1668 {
1669 CHECK_VALID_SIZE(inputs.size(), 3);
1670 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001671
1672 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1673 CHECK_VALID_SIZE(outputs.size(), 1);
1674
Teresa Charlin024ef0b2023-04-26 11:19:03 +01001675
1676 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
1677 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
1678
1679 // TfLite uses NHWC tensors
1680 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1681 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1682
1683 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1684 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1685
Ryan OSheaf0a35b82023-02-21 18:32:30 +00001686 // This block determines the output shape of the transpose convolution. If the output shape tensor ptr is not null
1687 // And the tensor is a constant, we can access the data at load time and set the output shape of the
1688 // layer. If this is not constant, We do not have access to the shape data, so we have to use
1689 // infer output shape and skip this code block.
1690 if (inputs[0] && IsConstTensor(inputs[0]))
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001691 {
Mike Kelly377fb212023-01-10 15:55:28 +00001692 armnn::TensorInfo tensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001693 std::vector<int> output_shape(tensorInfo.GetNumElements());
Mike Kelly377fb212023-01-10 15:55:28 +00001694
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001695 if (tensorInfo.GetDataType() == DataType::Signed32)
1696 {
1697 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1698 }
1699 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1700 {
1701 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1702 {
1703 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1704 }
1705 }
1706 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1707 for (int dimension : output_shape)
1708 {
1709 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1710 }
1711 desc.m_OutputShapeEnabled = true;
Teresa Charlin024ef0b2023-04-26 11:19:03 +01001712
1713 // TfLite uses NHWC tensors
1714 const unsigned int outputHeight = desc.m_OutputShape[1];
1715 const unsigned int outputWidth = desc.m_OutputShape[2];
1716
1717 CalcPadding(inputHeight,
1718 filterHeight,
1719 desc.m_StrideY,
1720 1, // DilationY
1721 desc.m_PadTop,
1722 desc.m_PadBottom,
1723 options->padding,
1724 outputHeight);
1725
1726 CalcPadding(inputWidth,
1727 filterWidth,
1728 desc.m_StrideX,
1729 1, // DilationX
1730 desc.m_PadLeft,
1731 desc.m_PadRight,
1732 options->padding,
1733 outputWidth);
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001734 }
Teresa Charlin024ef0b2023-04-26 11:19:03 +01001735 else
1736 {
1737 CalcPadding(inputHeight,
1738 filterHeight,
1739 desc.m_StrideY,
1740 1, // DilationY
1741 desc.m_PadTop,
1742 desc.m_PadBottom,
1743 options->padding);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001744
Teresa Charlin024ef0b2023-04-26 11:19:03 +01001745 CalcPadding(inputWidth,
1746 filterWidth,
1747 desc.m_StrideX,
1748 1, // DilationX
1749 desc.m_PadLeft,
1750 desc.m_PadRight,
1751 options->padding);
1752 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001753
Mike Kelly5880b912022-01-28 16:18:54 +00001754 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001755
1756 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001757 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001758
David Monahan61683802021-01-12 09:11:07 +00001759 if (desc.m_BiasEnabled)
1760 {
Mike Kelly377fb212023-01-10 15:55:28 +00001761 auto biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 3);
Mike Kelly5880b912022-01-28 16:18:54 +00001762 auto biasConstTensor = CreateConstTensorNonPermuted(inputs[3], biasTensorInfo, inputTensorInfo.GetDataType());
David Monahan61683802021-01-12 09:11:07 +00001763 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Mike Kelly5880b912022-01-28 16:18:54 +00001764 filterTensorAndData.first,
1765 biasConstTensor.first,
David Monahan61683802021-01-12 09:11:07 +00001766 layerName.c_str());
1767 }
1768 else
1769 {
1770 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Mike Kelly5880b912022-01-28 16:18:54 +00001771 filterTensorAndData.first,
David Monahan61683802021-01-12 09:11:07 +00001772 EmptyOptional(),
1773 layerName.c_str());
1774 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001775
Ryan OSheac229b3f2023-06-27 22:34:54 +01001776 if (!layer)
1777 {
1778 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1779 operatorIndex, CHECK_LOCATION().AsString()));
1780 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001781
Mike Kelly377fb212023-01-10 15:55:28 +00001782 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0 , { 2, 1 });
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001783 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1784
1785 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1786 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001787 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001788
1789 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1790 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1791}
1792
Kevin May7d96b162021-02-03 17:38:41 +00001793void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001794{
1795 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1796}
1797
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001798void TfLiteParserImpl::ParseBatchMatMul(size_t subgraphIndex, size_t operatorIndex)
1799{
1800 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1801
1802 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1803 CHECK_VALID_SIZE(inputs.size(), 2);
1804
1805 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1806 CHECK_VALID_SIZE(outputs.size(), 1);
1807
1808 auto layerName = fmt::format("BatchMatMul:{}:{}", subgraphIndex, operatorIndex);
1809
Mike Kelly377fb212023-01-10 15:55:28 +00001810 TensorInfo inputXTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1811 TensorInfo inputYTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001812
1813 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1814 const auto* options = operatorPtr->builtin_options.AsBatchMatMulOptions();
1815
Teresa Charlinbc37a6b2022-09-22 10:12:58 +01001816 // Adjoint in tensorflow lite performs transpose operation
1817 BatchMatMulDescriptor descriptor(options->adj_x,
1818 options->adj_y,
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001819 false,
Teresa Charlinbc37a6b2022-09-22 10:12:58 +01001820 false);
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001821 // Arbitrary DataLayout
1822
1823 IConnectableLayer* layer = m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001824
1825 if (!layer)
1826 {
1827 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1828 operatorIndex, CHECK_LOCATION().AsString()));
1829 }
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001830
Mike Kelly377fb212023-01-10 15:55:28 +00001831 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001832 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1833
1834 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1835 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1836
1837 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1838 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1839}
1840
Kevin May7d96b162021-02-03 17:38:41 +00001841void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001842{
1843 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1844
1845 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1846 CHECK_VALID_SIZE(inputs.size(), 3);
1847
1848 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1849 CHECK_VALID_SIZE(outputs.size(), 1);
1850
Mike Kelly377fb212023-01-10 15:55:28 +00001851 armnn::TensorInfo blockShapeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001852 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1853
Mike Kelly377fb212023-01-10 15:55:28 +00001854 armnn::TensorInfo cropsTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001855 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1856
1857 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1858 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1859
1860 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1861 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1862
1863 size_t step = 2;
1864 std::vector<std::pair<unsigned int, unsigned int>> crops;
1865 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1866 {
1867 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1868 }
1869
1870 armnn::BatchToSpaceNdDescriptor desc;
1871 desc.m_BlockShape = blockShape;
1872 desc.m_Crops = crops;
1873 desc.m_DataLayout = armnn::DataLayout::NHWC;
1874
James Ward58dec6b2020-09-11 17:32:44 +01001875 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001876
Mike Kelly377fb212023-01-10 15:55:28 +00001877 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01001878
1879 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001880
1881 if (!layer)
1882 {
1883 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1884 operatorIndex, CHECK_LOCATION().AsString()));
1885 }
Mike Kelly377fb212023-01-10 15:55:28 +00001886
1887 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1888 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001889 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1890
1891 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1892 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1893
1894 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1895 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1896}
1897
Idriss Chaouch564c13d2023-09-01 17:58:38 +01001898void TfLiteParserImpl::ParseBroadcastTo(size_t subgraphIndex, size_t operatorIndex)
1899{
1900 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1901
1902 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1903 CHECK_VALID_SIZE(inputs.size(), 2);
1904
1905 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1906 CHECK_VALID_SIZE(outputs.size(), 1);
1907
1908 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1909 TensorInfo shapeTensorInfo = ToTensorInfo(inputs[1]);
1910 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
1911
1912 auto layerName = fmt::format("Broadcast_to:{}:{}", subgraphIndex, operatorIndex);
1913
1914 BroadcastToDescriptor descriptor;
1915
1916 auto shapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1917 if (shapeBufferPtr != nullptr)
1918 {
1919 std::vector<unsigned int> targetShape;
1920 unsigned int numElement = shapeTensorInfo.GetNumElements();
1921 auto shapeData = reinterpret_cast<const int32_t*>(shapeBufferPtr->data.data());
1922 if (shapeData)
1923 {
1924 for (unsigned int i = 0; i < numElement; ++i)
1925 {
1926 targetShape.push_back(armnn::numeric_cast<unsigned int>(shapeData[i]));
1927 }
1928 descriptor.m_BroadcastToShape = TensorShape(numElement, targetShape.data());
1929 }
1930 /// get dataShape from outputShape if missing
1931 else
1932 {
1933 if(outputTensorInfo.GetShape().GetNumElements() <= 1)
1934 {
1935 ARMNN_THROW_PARSE_EXCEPTION("For Broadcast_to layer, "
1936 "data and output shape are not found in the buffer.");
1937 }
1938 descriptor.m_BroadcastToShape = outputTensorInfo.GetShape();
1939 }
1940 }
1941 else
1942 {
1943 ARMNN_THROW_PARSE_EXCEPTION("For Broadcast_to layer, Shape data was not found in the buffer.");
1944 }
1945
1946 IConnectableLayer* layer = m_Network->AddBroadcastToLayer(descriptor, layerName.c_str());
1947 ARMNN_ASSERT(layer != nullptr);
1948
1949 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1950
1951 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1952 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1953
1954 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1955 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1956}
1957
Kevin May7d96b162021-02-03 17:38:41 +00001958void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson28c94572019-07-18 10:47:03 +01001959{
1960 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1961
1962 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1963 CHECK_VALID_SIZE(inputs.size(), 1);
1964
1965 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1966 CHECK_VALID_SIZE(outputs.size(), 1);
1967
1968 L2NormalizationDescriptor desc;
1969 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001970 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001971 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1972
Ryan OSheac229b3f2023-06-27 22:34:54 +01001973 if (!layer)
1974 {
1975 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1976 operatorIndex, CHECK_LOCATION().AsString()));
1977 }
Matthew Jackson28c94572019-07-18 10:47:03 +01001978
Mike Kelly377fb212023-01-10 15:55:28 +00001979 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Jackson28c94572019-07-18 10:47:03 +01001980 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1981
1982 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1983 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1984
1985 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1986 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1987}
1988
Kevin May7d96b162021-02-03 17:38:41 +00001989void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001990{
1991 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1992}
1993
Kevin May7d96b162021-02-03 17:38:41 +00001994void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001995{
1996 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1997
1998 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1999 CHECK_VALID_SIZE(inputs.size(), 2);
2000
2001 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2002 CHECK_VALID_SIZE(outputs.size(), 1);
2003
James Ward58dec6b2020-09-11 17:32:44 +01002004 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01002005
Mike Kelly377fb212023-01-10 15:55:28 +00002006 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2007 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
James Conroy05102392020-06-24 15:39:55 +01002008 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02002009
Mike Kelly3ec30772023-03-08 13:47:17 +00002010 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Maximum, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002011
2012 if (!layer)
2013 {
2014 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2015 operatorIndex, CHECK_LOCATION().AsString()));
2016 }
Mike Kelly377fb212023-01-10 15:55:28 +00002017
2018 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
2019 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02002020 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2021
2022 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002023 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02002024
2025 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2026 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2027}
2028
Kevin May7d96b162021-02-03 17:38:41 +00002029void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02002030{
2031 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2032
2033 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2034 CHECK_VALID_SIZE(inputs.size(), 2);
2035
2036 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2037 CHECK_VALID_SIZE(outputs.size(), 1);
2038
James Ward58dec6b2020-09-11 17:32:44 +01002039 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01002040
Mike Kelly377fb212023-01-10 15:55:28 +00002041 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2042 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
James Conroy05102392020-06-24 15:39:55 +01002043 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02002044
Mike Kelly3ec30772023-03-08 13:47:17 +00002045 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Minimum, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002046
2047 if (!layer)
2048 {
2049 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2050 operatorIndex, CHECK_LOCATION().AsString()));
2051 }
Mike Kelly377fb212023-01-10 15:55:28 +00002052
2053 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
2054 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02002055 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2056
2057 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002058 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02002059
2060 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2061 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2062}
2063
Kevin May7d96b162021-02-03 17:38:41 +00002064void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
2065 size_t operatorIndex,
2066 PoolingAlgorithm algorithm)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002067{
2068 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2069
Mike Kelly0d77ae12022-01-07 17:42:27 +00002070 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2071 const auto* options = operatorPtr->builtin_options.AsPool2DOptions();
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002072
2073 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2074
2075 std::string layerName;
2076
2077 switch (algorithm)
2078 {
2079 case PoolingAlgorithm::Average:
2080 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01002081 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002082 break;
2083 case PoolingAlgorithm::Max:
2084 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01002085 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002086 break;
2087 default:
Ryan OSheac229b3f2023-06-27 22:34:54 +01002088 throw ParseException(fmt::format("Unsupported Pooling Algorithm {}", CHECK_LOCATION().AsString()));
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002089 }
2090
2091 Pooling2dDescriptor desc;
2092
2093 desc.m_PoolType = algorithm;
2094 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
2095 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
2096 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
2097 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
2098 desc.m_PaddingMethod = PaddingMethod::Exclude;
2099 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00002100 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002101
2102 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2103 CHECK_VALID_SIZE(inputs.size(), 1);
Mike Kelly377fb212023-01-10 15:55:28 +00002104 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002105
2106 // assuming input is NHWC
2107 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
2108 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
2109
Pablo Tellof0bd6832019-04-26 17:58:13 +01002110 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
2111 desc.m_PadTop, desc.m_PadBottom, options->padding);
2112 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
2113 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002114
2115 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2116 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002117
James Conroy05102392020-06-24 15:39:55 +01002118 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002119
2120 if (!layer)
2121 {
2122 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2123 operatorIndex, CHECK_LOCATION().AsString()));
2124 }
Mike Kelly377fb212023-01-10 15:55:28 +00002125
2126 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2127 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
jimfly01c25411c2018-11-14 17:47:22 +00002128 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002129
2130 // register the input connection slots for the layer, connections are made after all layers have been created
2131 // only the tensors for the inputs are relevant, exclude the const tensors
2132 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00002133 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002134
jimfly01c25411c2018-11-14 17:47:22 +00002135 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002136 // register the output connection slots for the layer, connections are made after all layers have been created
2137 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2138 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2139}
2140
Kevin May7d96b162021-02-03 17:38:41 +00002141void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
josh minorba424d22019-11-13 10:55:17 -06002142{
2143 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2144
2145 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2146 CHECK_VALID_SIZE(inputs.size(), 3);
2147 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2148 CHECK_VALID_SIZE(outputs.size(), 1);
2149
2150 SliceDescriptor desc;
2151
2152 // set begin tensor info for slice descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00002153 armnn::TensorInfo beginTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
josh minorba424d22019-11-13 10:55:17 -06002154 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2155
2156 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
2157 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
2158
2159 // set size tensor info for slice descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00002160 armnn::TensorInfo sizeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
josh minorba424d22019-11-13 10:55:17 -06002161 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2162
Cathal Corbettde33dda2022-09-20 16:40:09 +01002163 std::vector<int> signedSize(sizeTensorInfo.GetNumElements(), 1);
2164
2165 // if size buffer data is not specified, all contents of size vector remain as values of 1
2166 if (sizeBufferPtr->data.data())
2167 {
2168 ::memcpy(signedSize.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2169 }
2170
josh minorba424d22019-11-13 10:55:17 -06002171 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
Mike Kelly377fb212023-01-10 15:55:28 +00002172 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly7ba84d62021-09-10 15:27:19 +01002173
2174 for (unsigned int i = 0; i < signedSize.size(); ++i)
2175 {
2176 int signedValue = signedSize[i];
Jim Flynnfca233e2021-09-23 12:16:53 +01002177
Mike Kelly7ba84d62021-09-10 15:27:19 +01002178 if (signedValue < -1 || signedValue > static_cast<int>(inputTensorInfo.GetShape()[i] - begin[i]))
2179 {
2180 throw ParseException(fmt::format("Invalid value for size {} size must be in range "
2181 "[-1, inputDimSize - begin] [-1, {}] inclusive {}",
2182 signedValue,
2183 inputTensorInfo.GetShape()[i] - begin[i],
2184 CHECK_LOCATION().AsString()));
2185 }
2186
2187 if (signedValue == -1)
2188 {
2189 size[i] = inputTensorInfo.GetShape()[i] - begin[i];
2190 }
2191 else
2192 {
2193 size[i] = static_cast<unsigned int>(signedValue);
2194 }
2195 }
2196
josh minorba424d22019-11-13 10:55:17 -06002197 desc = SliceDescriptor(begin, size);
2198
James Ward58dec6b2020-09-11 17:32:44 +01002199 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06002200
James Conroy05102392020-06-24 15:39:55 +01002201 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
Mike Kelly377fb212023-01-10 15:55:28 +00002202
2203 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2204 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
josh minorba424d22019-11-13 10:55:17 -06002205 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2206
2207 // register the input connection slots for the layer, connections are made after all layers have been created
2208 // only the tensors for the inputs are relevant, exclude the const tensors
2209 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2210 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2211
2212 // register the output connection slots for the layer, connections are made after all layers have been created
2213 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2214 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2215}
2216
Kevin May7d96b162021-02-03 17:38:41 +00002217void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01002218{
2219 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002220 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2221 const auto* options = operatorPtr->builtin_options.AsSoftmaxOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01002222
2223 SoftmaxDescriptor desc;
2224 desc.m_Beta = options->beta;
2225
2226 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2227 CHECK_VALID_SIZE(inputs.size(), 1);
2228 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2229 CHECK_VALID_SIZE(outputs.size(), 1);
2230
James Ward58dec6b2020-09-11 17:32:44 +01002231 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01002232 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
2233
Mike Kelly377fb212023-01-10 15:55:28 +00002234 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
telsoa01c577f2c2018-08-31 09:22:23 +01002235 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2236
2237 // register the input connection slots for the layer, connections are made after all layers have been created
2238 // only the tensors for the inputs are relevant, exclude the const tensors
2239 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2240 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2241
2242 // register the output connection slots for the layer, connections are made after all layers have been created
2243 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2244 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2245}
2246
Teresa Charlinfd33a692022-06-29 15:35:57 +01002247void TfLiteParserImpl::ParseLogSoftmax(size_t subgraphIndex, size_t operatorIndex)
2248{
2249 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2250
2251 LogSoftmaxDescriptor desc;
2252
2253 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2254 CHECK_VALID_SIZE(inputs.size(), 1);
2255 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2256 CHECK_VALID_SIZE(outputs.size(), 1);
2257
2258 auto layerName = fmt::format("LogSoftmax:{}:{}", subgraphIndex, operatorIndex);
2259 IConnectableLayer* const layer = m_Network->AddLogSoftmaxLayer(desc, layerName.c_str());
2260
Mike Kelly377fb212023-01-10 15:55:28 +00002261 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Teresa Charlinfd33a692022-06-29 15:35:57 +01002262 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2263
2264 // register the input connection slots for the layer, connections are made after all layers have been created
2265 // only the tensors for the inputs are relevant, exclude the const tensors
2266 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2267 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2268
2269 // register the output connection slots for the layer, connections are made after all layers have been created
2270 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2271 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2272}
2273
Kevin May7d96b162021-02-03 17:38:41 +00002274void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002275{
2276 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2277
2278 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2279 CHECK_VALID_SIZE(inputs.size(), 3);
2280
2281 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2282 CHECK_VALID_SIZE(outputs.size(), 1);
2283
Mike Kelly377fb212023-01-10 15:55:28 +00002284 armnn::TensorInfo blockShapeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002285 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2286
Mike Kelly377fb212023-01-10 15:55:28 +00002287 armnn::TensorInfo padListTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002288 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2289
2290 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
2291 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
2292
2293 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
2294 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
2295
2296 size_t step = 2;
2297 std::vector<std::pair<unsigned int, unsigned int>> padList;
2298 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
2299 {
2300 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
2301 }
2302
2303 armnn::SpaceToBatchNdDescriptor desc;
2304 desc.m_BlockShape = blockShape;
2305 desc.m_PadList = padList;
2306 desc.m_DataLayout = armnn::DataLayout::NHWC;
2307
James Ward58dec6b2020-09-11 17:32:44 +01002308 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002309
Mike Kelly377fb212023-01-10 15:55:28 +00002310 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01002311
2312 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002313
2314 if (!layer)
2315 {
2316 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2317 operatorIndex, CHECK_LOCATION().AsString()));
2318 }
Mike Kelly377fb212023-01-10 15:55:28 +00002319
2320 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2321 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002322 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2323
2324 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2325 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2326
2327 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2328 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2329}
2330
Teresa Charlin2a764ad2023-02-24 18:17:31 +00002331void TfLiteParserImpl::ParseSpaceToDepth(size_t subgraphIndex, size_t operatorIndex)
2332{
2333 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2334
2335 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2336 CHECK_VALID_SIZE(inputs.size(), 1);
2337 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2338 CHECK_VALID_SIZE(outputs.size(), 1);
2339
2340 armnn::SpaceToDepthDescriptor descriptor;
2341
2342 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2343 const auto* options = operatorPtr->builtin_options.AsSpaceToDepthOptions();
2344 auto blockSize = options->block_size;
2345 if (blockSize < 2)
2346 {
2347 throw ParseException(
2348 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
2349 blockSize,
2350 CHECK_LOCATION().AsString()));
2351 }
2352 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
2353
2354 auto layerName = fmt::format("SpaceToDepth:{}:{}", subgraphIndex, operatorIndex);
2355 IConnectableLayer* layer = m_Network->AddSpaceToDepthLayer(descriptor, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002356
2357 if (!layer)
2358 {
2359 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2360 operatorIndex, CHECK_LOCATION().AsString()));
2361 }
2362
Teresa Charlin2a764ad2023-02-24 18:17:31 +00002363 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2364 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2365
2366 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2367 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2368
2369 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2370 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2371}
2372
Teresa Charlin3ab85482021-06-08 16:59:29 +01002373armnn::TensorInfo TfLiteParserImpl::OutputShapeOfSqueeze(std::vector<uint32_t> squeezeDims,
Mike Kelly0d77ae12022-01-07 17:42:27 +00002374 const armnn::TensorInfo& inputTensorInfo)
telsoa01c577f2c2018-08-31 09:22:23 +01002375{
Teresa Charlin3ab85482021-06-08 16:59:29 +01002376 CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
telsoa01c577f2c2018-08-31 09:22:23 +01002377 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2378
2379 if (inputTensorInfo.GetNumDimensions() > 4)
2380 {
2381 std::stringstream ss;
2382 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2383 << " shape:" << inputTensorInfo.GetShape() << " "
2384 << CHECK_LOCATION().AsString();
2385 throw ParseException(ss.str());
2386 }
2387
2388 if (squeezeDims.empty())
2389 {
2390 squeezeDims.assign(dimensionSequence,
2391 dimensionSequence+inputTensorInfo.GetNumDimensions());
2392 }
2393
2394 std::vector<uint32_t> outputDims;
2395 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2396 {
2397 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2398 auto currentDimension = inputTensorInfo.GetShape()[i];
2399 if (skipSqueeze || currentDimension != 1)
2400 {
2401 outputDims.push_back(currentDimension);
2402 }
2403 }
2404
2405 if (outputDims.size() > 4)
2406 {
2407 std::stringstream ss;
2408 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2409 << " shape:" << inputTensorInfo.GetShape() << " "
2410 << CHECK_LOCATION().AsString();
2411 throw ParseException(ss.str());
2412 }
2413
2414 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2415 outputDims.data());
2416
2417 // we need to preserve the tensor type and the quantization data as well
2418 TensorInfo outTensorInfo = inputTensorInfo;
2419 outTensorInfo.SetShape(outShape);
2420
2421 return outTensorInfo;
2422}
2423
Keith Davis0176fd82021-06-01 17:36:32 +01002424void TfLiteParserImpl::ParseShape(size_t subgraphIndex, size_t operatorIndex)
2425{
2426 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2427
2428 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2429 CHECK_VALID_SIZE(inputs.size(), 1);
2430 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2431 CHECK_VALID_SIZE(outputs.size(), 1);
2432
2433 auto layerName = fmt::format("Shape:{}:{}", subgraphIndex, operatorIndex);
2434
2435 IConnectableLayer* layer = m_Network->AddShapeLayer(layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002436
2437 if (!layer)
2438 {
2439 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2440 operatorIndex, CHECK_LOCATION().AsString()));
2441 }
Keith Davis0176fd82021-06-01 17:36:32 +01002442
Mike Kelly377fb212023-01-10 15:55:28 +00002443 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Keith Davis0176fd82021-06-01 17:36:32 +01002444 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2445
2446 // Check if output tensor type is Signed32 or Signed64
2447 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
2448 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
2449 {
2450 throw ParseException(
2451 fmt::format(
2452 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
2453 CHECK_LOCATION().AsString()));
2454 }
2455
2456 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2457 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2458
2459 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2460 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2461}
2462
Kevin May7d96b162021-02-03 17:38:41 +00002463void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01002464{
2465 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2466
2467 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2468 CHECK_VALID_SIZE(inputs.size(), 1);
2469
2470 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2471 CHECK_VALID_SIZE(outputs.size(), 1);
2472
2473 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2474 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002475 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01002476
Mike Kelly377fb212023-01-10 15:55:28 +00002477 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Teresa Charlin3ab85482021-06-08 16:59:29 +01002478
2479 std::vector<uint32_t> squeezeDim;
2480 // A single negative dim index is interpreted as a negative index in python
2481 // Meaning the index will be the shape size plus the negative index value
2482 if (options->squeeze_dims.size() == 1 && options->squeeze_dims[0] < 0)
2483 {
2484 int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
2485 squeezeDim.push_back(static_cast<uint32_t>(dim));
2486 }
2487 else
2488 {
2489 squeezeDim = AsUnsignedVector(options->squeeze_dims);
2490 }
2491
2492 armnn::TensorInfo outputTensorInfo = TfLiteParserImpl::OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
2493
James Conroy05102392020-06-24 15:39:55 +01002494 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01002495
2496 ReshapeDescriptor reshapeDesc;
2497 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
2498
Mike Kellyb2293702023-02-14 17:16:12 +00002499 auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
2500 m_TensorInfos[outputTensorIds[0]] = outputTensorInfo;
2501
telsoa01c577f2c2018-08-31 09:22:23 +01002502 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002503
2504 if (!layer)
2505 {
2506 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2507 operatorIndex, CHECK_LOCATION().AsString()));
2508 }
2509
telsoa01c577f2c2018-08-31 09:22:23 +01002510 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2511
2512 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2513 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2514
2515 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2516 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2517}
2518
Kevin May7d96b162021-02-03 17:38:41 +00002519void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002520{
2521 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2522
2523 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2524 CHECK_VALID_SIZE(inputs.size(), 4);
2525
2526 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2527 CHECK_VALID_SIZE(outputs.size(), 1);
2528
Mike Kelly0d77ae12022-01-07 17:42:27 +00002529 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2530 const auto* options = operatorPtr->builtin_options.AsStridedSliceOptions();
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002531
2532 StridedSliceDescriptor desc;
2533 desc.m_BeginMask = options->begin_mask;
2534 desc.m_EllipsisMask = options->ellipsis_mask;
2535 desc.m_EndMask = options->end_mask;
2536 desc.m_NewAxisMask = options->new_axis_mask;
2537 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
2538 desc.m_DataLayout = armnn::DataLayout::NHWC;
2539
Mike Kelly377fb212023-01-10 15:55:28 +00002540 armnn::TensorInfo beginTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002541 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2542
2543 std::vector<int> begin(beginTensorInfo.GetNumElements());
David Monahan39085f72023-07-28 11:37:29 +01002544 if (beginBufferPtr->data.data() != nullptr)
2545 {
2546 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
2547 }
2548 else
2549 {
2550 throw ParseException("ParseStridedSlice: Invalid input - the begin vector is null");
2551 }
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002552
Mike Kelly377fb212023-01-10 15:55:28 +00002553 armnn::TensorInfo endTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002554 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2555
2556 std::vector<int> end(endTensorInfo.GetNumElements());
David Monahan39085f72023-07-28 11:37:29 +01002557 if (endBufferPtr->data.data() != nullptr)
2558 {
2559 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
2560 }
2561 else
2562 {
2563 throw ParseException("ParseStridedSlice: Invalid input - the end vector is null");
2564 }
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002565
Mike Kelly377fb212023-01-10 15:55:28 +00002566 armnn::TensorInfo strideTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 3);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002567 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
2568
2569 std::vector<int> stride(strideTensorInfo.GetNumElements());
David Monahan39085f72023-07-28 11:37:29 +01002570
2571 if (strideBufferPtr->data.data() != nullptr)
2572 {
2573 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
2574 }
2575 else
2576 {
2577 throw ParseException("ParseStridedSlice: Invalid input - the stride vector is null");
2578 }
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002579
2580 desc.m_Begin = begin;
2581 desc.m_End = end;
2582 desc.m_Stride = stride;
2583
James Ward58dec6b2020-09-11 17:32:44 +01002584 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002585 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002586
2587 if (!layer)
2588 {
2589 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2590 operatorIndex, CHECK_LOCATION().AsString()));
2591 }
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002592
Mike Kelly377fb212023-01-10 15:55:28 +00002593 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002594 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2595
2596 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2597 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2598
2599 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2600 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2601}
2602
Kevin May7d96b162021-02-03 17:38:41 +00002603void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002604{
2605 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2606
Mike Kelly0d77ae12022-01-07 17:42:27 +00002607 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2608 const auto* options = operatorPtr->builtin_options.AsSubOptions();
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002609
2610 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2611 CHECK_VALID_SIZE(inputs.size(), 2);
2612
2613 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2614 CHECK_VALID_SIZE(outputs.size(), 1);
2615
Mike Kelly377fb212023-01-10 15:55:28 +00002616 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2617 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002618
James Ward58dec6b2020-09-11 17:32:44 +01002619 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002620 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Sub, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002621
2622 if (!layer)
2623 {
2624 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2625 operatorIndex, CHECK_LOCATION().AsString()));
2626 }
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002627
Mike Kelly377fb212023-01-10 15:55:28 +00002628 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002629 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2630
2631 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002632 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Teresa Charlind04873f2023-05-23 14:16:28 +01002633 if (options)
2634 {
2635 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2636 }
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002637
2638 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2639 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2640}
2641
Kevin May7d96b162021-02-03 17:38:41 +00002642void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302643{
2644 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2645
Mike Kelly0d77ae12022-01-07 17:42:27 +00002646 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2647 const auto* options = operatorPtr->builtin_options.AsDivOptions();
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302648
2649 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2650 CHECK_VALID_SIZE(inputs.size(), 2);
2651
2652 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2653 CHECK_VALID_SIZE(outputs.size(), 1);
2654
Mike Kelly377fb212023-01-10 15:55:28 +00002655 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2656 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302657
James Ward58dec6b2020-09-11 17:32:44 +01002658 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002659 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002660
2661 if (!layer)
2662 {
2663 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2664 operatorIndex, CHECK_LOCATION().AsString()));
2665 }
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302666
Mike Kelly377fb212023-01-10 15:55:28 +00002667 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302668 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2669
2670 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002671 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Teresa Charlind04873f2023-05-23 14:16:28 +01002672 if (options)
2673 {
2674 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2675 }
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302676
2677 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2678 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2679}
2680
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002681void TfLiteParserImpl::ParseFloorDiv(size_t subgraphIndex, size_t operatorIndex)
2682{
2683 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2684
2685 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2686 CHECK_VALID_SIZE(inputs.size(), 2);
2687
2688 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2689 CHECK_VALID_SIZE(outputs.size(), 1);
2690
Mike Kelly377fb212023-01-10 15:55:28 +00002691 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2692 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002693
2694 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002695 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002696
2697 if (!layer)
2698 {
2699 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2700 operatorIndex, CHECK_LOCATION().AsString()));
2701 }
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002702
Mike Kelly377fb212023-01-10 15:55:28 +00002703 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002704 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2705
2706 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2707 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2708 layer = AddFusedFloorLayer(layer, 0);
2709
2710 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2711 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2712}
2713
Kevin May7d96b162021-02-03 17:38:41 +00002714void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002715{
2716 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2717
Mike Kelly0d77ae12022-01-07 17:42:27 +00002718 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2719 const auto* options = operatorPtr->builtin_options.AsAddOptions();
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002720
2721 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2722 CHECK_VALID_SIZE(inputs.size(), 2);
2723
2724 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2725 CHECK_VALID_SIZE(outputs.size(), 1);
2726
Mike Kelly377fb212023-01-10 15:55:28 +00002727 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2728 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002729
James Ward58dec6b2020-09-11 17:32:44 +01002730 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002731 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Add, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002732
2733 if (!layer)
2734 {
2735 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2736 operatorIndex, CHECK_LOCATION().AsString()));
2737 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002738
Mike Kelly377fb212023-01-10 15:55:28 +00002739 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002740 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2741
2742 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002743 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Teresa Charlind04873f2023-05-23 14:16:28 +01002744 if (options)
2745 {
2746 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2747 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002748
2749 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2750 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2751}
2752
Kevin May7d96b162021-02-03 17:38:41 +00002753void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002754{
2755 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2756
Mike Kelly0d77ae12022-01-07 17:42:27 +00002757 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2758 const auto* options = operatorPtr->builtin_options.AsMulOptions();
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002759
2760 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2761 CHECK_VALID_SIZE(inputs.size(), 2);
2762
2763 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2764 CHECK_VALID_SIZE(outputs.size(), 1);
2765
Mike Kelly377fb212023-01-10 15:55:28 +00002766 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2767 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002768
James Ward58dec6b2020-09-11 17:32:44 +01002769 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002770 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Mul, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002771
2772 if (!layer)
2773 {
2774 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2775 operatorIndex, CHECK_LOCATION().AsString()));
2776 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002777
Mike Kelly377fb212023-01-10 15:55:28 +00002778 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002779 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2780
2781 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002782 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Teresa Charlind04873f2023-05-23 14:16:28 +01002783 if (options)
2784 {
2785 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2786 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002787
2788 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2789 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2790}
2791
Kevin May7d96b162021-02-03 17:38:41 +00002792void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002793{
2794 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2795
2796 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2797
2798 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2799 CHECK_VALID_SIZE(outputs.size(), 1);
2800
Teresa Charlin046e2cb2023-03-28 17:20:19 +01002801 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2802 TensorInfo dimTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002803
2804 armnn::MeanDescriptor desc;
Teresa Charlin046e2cb2023-03-28 17:20:19 +01002805 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2806 // Get const axis value from model and set it to descriptor.
2807 if (axisBufferPtr != nullptr)
2808 {
2809 std::vector<int32_t> axisData(dimTensorInfo.GetNumElements());
2810 ::memcpy(axisData.data(), axisBufferPtr->data.data(), dimTensorInfo.GetNumBytes());
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002811
Teresa Charlin046e2cb2023-03-28 17:20:19 +01002812 // Convert the axis to unsigned int and remove duplicates.
2813 auto rank = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
2814 std::set<unsigned int> uniqueAxis;
2815 std::transform(axisData.begin(),
2816 axisData.end(),
2817 std::inserter(uniqueAxis, uniqueAxis.begin()),
2818 [rank](int i)->unsigned int{
2819 return static_cast<uint32_t>(((i + rank) % rank)); });
2820 desc.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2821 }
2822 else
2823 {
2824 for (uint32_t i = 0; i < inputTensorInfo.GetNumDimensions(); ++i)
2825 {
2826 desc.m_Axis.push_back(i);
2827 }
2828 }
2829
Sadik Armagand109a4d2020-07-28 10:42:13 +01002830 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002831
Teresa Charlin046e2cb2023-03-28 17:20:19 +01002832 desc.m_KeepDims = inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ? true : false;
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002833
James Ward58dec6b2020-09-11 17:32:44 +01002834 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002835 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002836
2837 if (!layer)
2838 {
2839 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2840 operatorIndex, CHECK_LOCATION().AsString()));
2841 }
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002842
Mike Kelly377fb212023-01-10 15:55:28 +00002843 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002844 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2845
2846 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2847 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2848
2849 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2850 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2851}
2852
Kevin May7d96b162021-02-03 17:38:41 +00002853void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002854{
2855 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2856
Kevin May7d96b162021-02-03 17:38:41 +00002857 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002858
Kevin May7d96b162021-02-03 17:38:41 +00002859 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002860 CHECK_VALID_SIZE(outputs.size(), 1);
2861
Mike Kelly377fb212023-01-10 15:55:28 +00002862 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2863 armnn::TensorInfo padTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002864
Mike Kelly0d77ae12022-01-07 17:42:27 +00002865 std::vector<unsigned int> padBuffer = GetUIntBuffer(padTensorInfo, m_Model, inputs[1]->buffer);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002866
2867 size_t step = 2;
2868 armnn::PadDescriptor desc;
Mike Kelly0d77ae12022-01-07 17:42:27 +00002869 auto opcode = GetOpCode(m_Model, subgraphIndex, operatorIndex);
2870
2871 if (opcode == tflite::BuiltinOperator_PAD)
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002872 {
Mike Kelly0d77ae12022-01-07 17:42:27 +00002873 CHECK_VALID_SIZE(inputs.size(), 2);
2874
2875 if (inputTensorInfo.IsQuantized())
2876 {
2877 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2878 }
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002879 }
Mike Kelly0d77ae12022-01-07 17:42:27 +00002880 else if (opcode == tflite::BuiltinOperator_PADV2)
2881 {
2882 CHECK_VALID_SIZE(inputs.size(), 3);
2883
Mike Kelly377fb212023-01-10 15:55:28 +00002884 armnn::TensorInfo padValueTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002885
2886 if (padValueTensorInfo.GetNumElements() != 1)
2887 {
2888 ARMNN_THROW_PARSE_EXCEPTION("Multiple padding values are not supported in PADV2");
2889 }
2890 BufferRawPtr padValueBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2891
2892 // Get the pad value from the input tensor
2893 if (padValueBufferPtr->data.size() > 0)
2894 {
2895 switch (padValueTensorInfo.GetDataType())
2896 {
2897 case armnn::DataType::Float32:
2898 {
2899 std::vector<float> padValueBuffer(padValueTensorInfo.GetNumElements());
2900 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2901 desc.m_PadValue = padValueBuffer[0];
2902 break;
2903 }
2904 case armnn::DataType::QAsymmU8:
2905 {
2906 std::vector<uint8_t> padValueBuffer(padValueTensorInfo.GetNumElements());
2907 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2908 desc.m_PadValue = armnn::Dequantize<uint8_t>(padValueBuffer[0],
2909 padValueTensorInfo.GetQuantizationScale(),
2910 padValueTensorInfo.GetQuantizationOffset());
2911 break;
2912 }
2913 case armnn::DataType::QAsymmS8:
2914 case armnn::DataType::QSymmS8:
2915 {
2916 std::vector<int8_t> padValueBuffer(padValueTensorInfo.GetNumElements());
2917 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2918 desc.m_PadValue = armnn::Dequantize<int8_t>(padValueBuffer[0],
2919 padValueTensorInfo.GetQuantizationScale(),
2920 padValueTensorInfo.GetQuantizationOffset());
2921 break;
2922 }
2923 default: ARMNN_THROW_PARSE_EXCEPTION("Unsupported DataType");
2924 }
2925 }
2926 else if (inputTensorInfo.IsQuantized())
2927 {
2928 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2929 }
2930 }
2931
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002932 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2933 {
2934 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2935 }
2936
Mike Kelly0d77ae12022-01-07 17:42:27 +00002937 auto layerName = (opcode == tflite::BuiltinOperator_PAD) ? fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex)
2938 : fmt::format("PadV2:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01002939
2940 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002941
2942 if (!layer)
2943 {
2944 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2945 operatorIndex, CHECK_LOCATION().AsString()));
2946 }
2947
Mike Kelly377fb212023-01-10 15:55:28 +00002948 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002949 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2950
2951 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2952 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2953
2954 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2955 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2956}
2957
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002958void TfLiteParserImpl::ParseMirrorPad(size_t subgraphIndex, size_t operatorIndex)
2959{
2960 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2961
2962 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2963 CHECK_VALID_SIZE(inputs.size(), 2);
2964
2965 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2966 CHECK_VALID_SIZE(outputs.size(), 1);
2967
Mike Kelly377fb212023-01-10 15:55:28 +00002968 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002969
Mike Kelly377fb212023-01-10 15:55:28 +00002970 armnn::TensorInfo padTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002971 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2972
2973 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
2974 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
2975
2976 size_t step = 2;
2977 armnn::PadDescriptor desc;
2978 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2979 {
2980 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2981 }
2982
2983 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2984 const auto* options = operatorPtr->builtin_options.AsMirrorPadOptions();
2985
2986 if (options->mode == tflite::MirrorPadMode_REFLECT)
2987 {
2988 desc.m_PaddingMode = PaddingMode::Reflect;
2989 }
2990 else if (options->mode == tflite::MirrorPadMode_SYMMETRIC)
2991 {
2992 desc.m_PaddingMode = PaddingMode::Symmetric;
2993 }
2994 else
2995 {
2996 ARMNN_THROW_PARSE_EXCEPTION("PaddingMode must be either REFLECT or SYMMETRIC");
2997 }
2998
2999 // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
3000 // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
3001 auto inputShape = inputTensorInfo.GetShape();
3002 auto padList = desc.m_PadList;
3003
3004 const unsigned int isReflect = static_cast<unsigned int>(desc.m_PaddingMode == PaddingMode::Reflect);
3005 for(unsigned int i = 0; i < padList.size(); ++i)
3006 {
3007 if(padList.at(i).first > (inputShape[i] - isReflect) ||
3008 padList.at(i).second > (inputShape[i] - isReflect))
3009 {
3010 ARMNN_THROW_PARSE_EXCEPTION("Padding values must be less (Reflect) or "
3011 "equal (Symmetric) to the dimension size.");
3012 }
3013 }
3014
3015 auto layerName = fmt::format("MirrorPad:{}:{}", subgraphIndex, operatorIndex);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01003016
3017 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01003018
3019 if (!layer)
3020 {
3021 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3022 operatorIndex, CHECK_LOCATION().AsString()));
3023 }
3024
Mike Kelly377fb212023-01-10 15:55:28 +00003025 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01003026 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3027
3028 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3029 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3030
3031 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3032 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3033}
3034
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01003035void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
3036{
3037 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3038
3039 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3040 CHECK_VALID_SIZE(inputs.size(), 2);
3041
3042 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3043 CHECK_VALID_SIZE(outputs.size(), 1);
3044
3045 auto layerName = fmt::format("Prelu:{}:{}", subgraphIndex, operatorIndex);
3046
Mike Kelly377fb212023-01-10 15:55:28 +00003047 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
3048 armnn::TensorInfo alphaTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01003049
3050 IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
Mike Kelly377fb212023-01-10 15:55:28 +00003051
Ryan OSheac229b3f2023-06-27 22:34:54 +01003052 if (!layer)
3053 {
3054 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3055 operatorIndex, CHECK_LOCATION().AsString()));
3056 }
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01003057
3058 if (IsConstTensor(inputs[1]))
3059 {
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01003060 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawaratbf99b5f2021-05-27 09:55:43 +01003061 armnn::IInputSlot* slot = &(layer->GetInputSlot(0));
3062 RegisterConsumerOfTensor(subgraphIndex, inputTensorIndexes[0], slot);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01003063
Mike Kelly5880b912022-01-28 16:18:54 +00003064 auto alphaTensorAndData = CreateConstTensorNonPermuted(inputs[1], alphaTensorInfo,
3065 inputTensorInfo.GetDataType());
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01003066 std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name);
3067 IConnectableLayer* constLayer =
Mike Kelly5880b912022-01-28 16:18:54 +00003068 m_Network->AddConstantLayer(alphaTensorAndData.first, constLayerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01003069
3070 if (!constLayer)
3071 {
3072 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3073 operatorIndex, CHECK_LOCATION().AsString()));
3074 }
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01003075
3076 constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
3077 constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
3078 RegisterOutputSlots(subgraphIndex,
3079 VIRTUAL_OPERATOR_ID,
3080 constLayer,
3081 { inputTensorIndexes[1] });
3082 }
3083 else
3084 {
3085 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3086 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIndexes);
3087 }
3088
Mike Kelly377fb212023-01-10 15:55:28 +00003089 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
3090 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
3091 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3092
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01003093 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3094 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3095}
3096
Kevin May7d96b162021-02-03 17:38:41 +00003097void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan66dedc72019-12-10 16:32:07 +00003098{
3099 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3100
3101 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3102 CHECK_VALID_SIZE(inputs.size(), 1);
3103
3104 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3105 CHECK_VALID_SIZE(outputs.size(), 1);
3106
James Ward58dec6b2020-09-11 17:32:44 +01003107 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00003108
3109 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01003110
3111 if (!layer)
3112 {
3113 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3114 operatorIndex, CHECK_LOCATION().AsString()));
3115 }
Sadik Armagan66dedc72019-12-10 16:32:07 +00003116
Mike Kelly377fb212023-01-10 15:55:28 +00003117 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan66dedc72019-12-10 16:32:07 +00003118 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3119
3120 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3121 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3122
3123 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3124 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3125}
Finn Williamsc42c3842019-01-22 14:18:11 +00003126
Kevin May7d96b162021-02-03 17:38:41 +00003127void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01003128{
Finn Williamsc42c3842019-01-22 14:18:11 +00003129 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01003130}
3131
Kevin May7d96b162021-02-03 17:38:41 +00003132void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01003133{
Finn Williamsc42c3842019-01-22 14:18:11 +00003134 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
3135}
Sadik Armagan58f39192018-09-17 14:14:39 +01003136
Kevin May7d96b162021-02-03 17:38:41 +00003137void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan12239e72020-05-27 11:06:17 +01003138{
Jan Eilers2f746b32020-07-28 14:00:06 +01003139 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01003140}
3141
Kevin May7d96b162021-02-03 17:38:41 +00003142void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsc42c3842019-01-22 14:18:11 +00003143{
3144 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
3145}
3146
Kevin May7d96b162021-02-03 17:38:41 +00003147void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd99851762019-04-09 09:37:38 +01003148{
3149 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
3150}
3151
Kevin May7d96b162021-02-03 17:38:41 +00003152void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
Matthew Sloyan7515d072020-12-16 12:50:01 +00003153{
3154 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
3155}
3156
Kevin May7d96b162021-02-03 17:38:41 +00003157void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
Jan Eilers2f746b32020-07-28 14:00:06 +01003158{
3159 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
3160}
Finn Williamsc42c3842019-01-22 14:18:11 +00003161
Kevin May7d96b162021-02-03 17:38:41 +00003162void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
Finn Williamsc42c3842019-01-22 14:18:11 +00003163{
3164 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00003165 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00003166 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01003167
3168 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3169 CHECK_VALID_SIZE(inputs.size(), 1);
3170
3171 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3172 CHECK_VALID_SIZE(outputs.size(), 1);
3173
James Ward58dec6b2020-09-11 17:32:44 +01003174 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01003175 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00003176 activationDesc.m_Function = activationType;
3177
3178 switch (activationType)
3179 {
3180 case ActivationFunction::ReLu:
3181 {
James Ward58dec6b2020-09-11 17:32:44 +01003182 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00003183 break;
3184 }
3185 case ActivationFunction::BoundedReLu:
3186 {
James Ward58dec6b2020-09-11 17:32:44 +01003187 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00003188 activationDesc.m_A = 6.0f;
3189 activationDesc.m_B = 0.0f;
3190 break;
3191 }
3192 case ActivationFunction::Sigmoid:
3193 {
James Ward58dec6b2020-09-11 17:32:44 +01003194 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00003195 break;
3196 }
Nina Drozd99851762019-04-09 09:37:38 +01003197 case ActivationFunction::TanH:
3198 {
James Ward58dec6b2020-09-11 17:32:44 +01003199 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01003200 activationDesc.m_A = 1.0f;
3201 activationDesc.m_B = 1.0f;
3202 break;
3203 }
Sadik Armagan12239e72020-05-27 11:06:17 +01003204 case ActivationFunction::LeakyReLu:
3205 {
James Ward58dec6b2020-09-11 17:32:44 +01003206 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00003207 const auto* options = operatorPtr->builtin_options.AsLeakyReluOptions();
Sadik Armagan12239e72020-05-27 11:06:17 +01003208 activationDesc.m_A = options->alpha;
3209 break;
3210 }
Matthew Sloyan7515d072020-12-16 12:50:01 +00003211 case ActivationFunction::Elu:
3212 {
3213 layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
3214 activationDesc.m_A = 1.0f;
3215 break;
3216 }
Jan Eilers2f746b32020-07-28 14:00:06 +01003217 case ActivationFunction::HardSwish:
Matthew Sloyan7515d072020-12-16 12:50:01 +00003218 {
James Ward58dec6b2020-09-11 17:32:44 +01003219 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01003220 break;
Matthew Sloyan7515d072020-12-16 12:50:01 +00003221 }
Finn Williamsc42c3842019-01-22 14:18:11 +00003222 default:
3223 {
3224 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003225 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
3226 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00003227 }
3228 }
3229
3230 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01003231
Mike Kelly377fb212023-01-10 15:55:28 +00003232 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan58f39192018-09-17 14:14:39 +01003233 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3234
3235 // register the input connection slots for the layer, connections are made after all layers have been created
3236 // only the tensors for the inputs are relevant, exclude the const tensors
3237 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3238 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3239
3240 // register the output connection slots for the layer, connections are made after all layers have been created
3241 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3242 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3243}
Mike Kelly0d77ae12022-01-07 17:42:27 +00003244armnn::TensorInfo TfLiteParserImpl::OutputShapeOfReshape(const armnn::TensorInfo& inputTensorInfo,
3245 const std::vector<int32_t>& targetDimsIn)
Sadikb94967b2018-09-19 15:30:00 +01003246{
3247 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
3248 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
3249
3250 if (stretchDim != targetDimsIn.end())
3251 {
3252 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
3253 {
3254 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003255 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01003256 }
3257
3258 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003259 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01003260 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
3261
3262 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
3263 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
3264 }
3265
3266 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
3267
3268 TensorInfo reshapeInfo = inputTensorInfo;
3269 reshapeInfo.SetShape(outputShape);
3270
3271 return reshapeInfo;
3272}
3273
Kevin May7d96b162021-02-03 17:38:41 +00003274void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
Sadikb94967b2018-09-19 15:30:00 +01003275{
3276 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3277
3278 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01003279
3280 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3281 CHECK_VALID_SIZE(outputs.size(), 1);
3282
Mike Kelly0d77ae12022-01-07 17:42:27 +00003283 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3284 const auto* options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01003285 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01003286
Mike Kelly377fb212023-01-10 15:55:28 +00003287 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
kevmay0171972a82018-12-17 14:28:03 +00003288 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01003289 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00003290
Jan Eilersbac9b352020-07-13 13:40:24 +01003291 // Extracting new shape for the output
3292 // There are two ways it can be passed
3293 // * First is to define the target shape in the operator built-in options
3294 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00003295 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01003296 bool targetShapeFound = false;
3297 // Check if built-in options were given
3298 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00003299 {
Jan Eilersbac9b352020-07-13 13:40:24 +01003300 // make sure the parameter is given
3301 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00003302 {
Jan Eilersbac9b352020-07-13 13:40:24 +01003303 targetShape = options->new_shape;
3304 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00003305 }
Derek Lambertic9e52792020-03-11 11:42:26 +00003306 }
Jan Eilersbac9b352020-07-13 13:40:24 +01003307
3308 // If there is no built-in option given or if the built-in new_shape parameter was empty
3309 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00003310 {
Teresa Charlin6a056a42021-12-01 10:25:43 +00003311 // Check for a second input tensor
3312 if (inputs.size() > 1 && inputs[1] != nullptr)
Jan Eilersbac9b352020-07-13 13:40:24 +01003313 {
3314 if (inputs[1]->is_variable)
3315 {
3316 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
3317 }
3318
3319 if (inputs[1]->shape.size() != 1)
3320 {
3321 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
3322 }
3323
3324 if (inputs[1]->type != tflite::TensorType_INT32)
3325 {
3326 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
3327 }
3328
Teresa Charlin6a056a42021-12-01 10:25:43 +00003329 // Extract target shape from input
3330 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3331 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
Cathal Corbettd2f73232021-12-10 13:38:52 +00003332 if (values)
Sadik Armagan19a1c032021-01-20 12:17:00 +00003333 {
Cathal Corbettd2f73232021-12-10 13:38:52 +00003334 for (int i = 0; i < inputs[1]->shape[0]; ++i)
3335 {
3336 targetShape.push_back(values[i]);
3337 }
Sadik Armagan19a1c032021-01-20 12:17:00 +00003338 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00003339 else
Jan Eilersbac9b352020-07-13 13:40:24 +01003340 {
Cathal Corbettd2f73232021-12-10 13:38:52 +00003341 try
3342 {
3343 // We attempt to infer during Runtime.
Mike Kelly04d82292023-01-19 18:29:40 +00003344 TensorShape reshapeShapes = ToTensorInfo(inputs[1]).GetShape();
3345
3346 if (reshapeShapes[0] == actualOutputTensorInfo.GetNumDimensions())
3347 {
3348 for (unsigned int i = 0; i < actualOutputTensorInfo.GetShape().GetNumDimensions(); ++i)
3349 {
3350 targetShape.push_back(actualOutputTensorInfo.GetShape()[i]);
3351 }
3352 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00003353 // The parser only supports shape (batch, -1) or (-1) for non-constant shape input.
Mike Kelly04d82292023-01-19 18:29:40 +00003354 else if (reshapeShapes[0] > 2)
Cathal Corbettd2f73232021-12-10 13:38:52 +00003355 {
3356 throw ParseException(fmt::format("Invalid input shape '{}' in Reshape layer '{}' {}. "
3357 "When inferring during runtime, the parser only supports "
3358 "shape (batch, -1) or (-1) for target shape input.",
3359 reshapeShapes[0],
3360 layerName,
3361 CHECK_LOCATION().AsString()));
3362 }
Mike Kelly04d82292023-01-19 18:29:40 +00003363 else
Cathal Corbettd2f73232021-12-10 13:38:52 +00003364 {
Mike Kelly04d82292023-01-19 18:29:40 +00003365 const int32_t numInputElements = inputTensorInfo.GetNumElements();
3366 const int32_t inputTensorShape = inputTensorInfo.GetShape()[0];
3367 if (reshapeShapes[0] == 1)
3368 {
3369 targetShape = {numInputElements};
3370 }
3371 else if (reshapeShapes[0] == 2)
3372 {
3373 targetShape = {inputTensorShape, numInputElements / inputTensorShape};
3374 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00003375 }
3376 }
3377 catch (const std::exception& exc)
3378 {
3379 ARMNN_THROW_PARSE_EXCEPTION("Failed attempt to infer during runtime the target shape input for "
3380 "Reshape operation. Reshape operator target shape input buffer data "
3381 "is null. " << exc.what());
3382 }
Jan Eilersbac9b352020-07-13 13:40:24 +01003383 }
3384 }
3385 else
Derek Lambertic9e52792020-03-11 11:42:26 +00003386 {
3387 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
3388 "At least one method required");
3389 }
Derek Lambertic9e52792020-03-11 11:42:26 +00003390 }
3391
kevmay0171972a82018-12-17 14:28:03 +00003392 armnn::TensorInfo reshapeOutputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00003393 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01003394
kevmay0171972a82018-12-17 14:28:03 +00003395 // Check for valid input size and that reshape parameters equal output shape
Cathal Corbett2b922e22022-09-23 15:49:24 +01003396 // The output shape can be provided to us in 2 ways:
3397 // 1. through the normal 'shape' parameter given by outputs[indx]->shape
3398 // 2. through additional parameter 'shape_signature' given by outputs[indx]->buffer.
3399 // This parameter can sometimes contain -1 value not visible in the 'shape' parameter.
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00003400 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
3401 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00003402 {
Cathal Corbett2b922e22022-09-23 15:49:24 +01003403 // Attempt to extract output shape from secondary 'shape_signature'
3404 // parameter and try to CheckShape() with this param.
3405 std::vector<int32_t> secondaryOutputTargetShape = outputs[0]->shape_signature;
3406
3407 // if outputs[0]->shape_signature contain a -1 value, we need to compute its actual value
3408 // from reshape input in order to correctly verify reshape parameters equal output shape
3409 armnn::TensorInfo secondaryReshapeOutputTensorInfo =
3410 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, secondaryOutputTargetShape);
3411
3412 if (!CheckShape(reshapeOutputTensorShape, secondaryReshapeOutputTensorInfo.GetShape()))
3413 {
3414 std::stringstream ss;
3415 ss << "New shape defined in reshape parameters "
3416 << reshapeOutputTensorShape
3417 << " does not equal output shape "
3418 << actualOutputTensorInfo.GetShape()
3419 << ": "
3420 << CHECK_LOCATION().AsString();
3421 throw ParseException(ss.str());
3422 }
kevmay0171972a82018-12-17 14:28:03 +00003423 }
Mike Kelly377fb212023-01-10 15:55:28 +00003424 auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
kevmay0171972a82018-12-17 14:28:03 +00003425
Sadikb94967b2018-09-19 15:30:00 +01003426 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00003427 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Mike Kelly377fb212023-01-10 15:55:28 +00003428 m_TensorInfos[outputTensorIds[0]] = reshapeOutputTensorInfo;
Sadikb94967b2018-09-19 15:30:00 +01003429
Sadikb94967b2018-09-19 15:30:00 +01003430 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01003431
3432 if (!layer)
3433 {
3434 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3435 operatorIndex, CHECK_LOCATION().AsString()));
3436 }
3437
kevmay0171972a82018-12-17 14:28:03 +00003438 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01003439
3440 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3441 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3442
3443 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3444 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3445}
3446
Kevin May7d96b162021-02-03 17:38:41 +00003447void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003448{
Sadik Armagana3b31f02019-12-05 09:08:53 +00003449 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
3450}
3451
Kevin May7d96b162021-02-03 17:38:41 +00003452void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagana3b31f02019-12-05 09:08:53 +00003453{
3454 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
3455}
3456
Kevin May7d96b162021-02-03 17:38:41 +00003457void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
Sadik Armagana3b31f02019-12-05 09:08:53 +00003458{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003459 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3460
3461 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3462 CHECK_VALID_SIZE(inputs.size(), 2);
3463
3464 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3465 CHECK_VALID_SIZE(outputs.size(), 1);
3466
Mike Kelly377fb212023-01-10 15:55:28 +00003467 armnn::TensorInfo sizeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003468
3469 // Data for the parsed tensor args (size) must be stored locally.
3470 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
3471
3472 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3473 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
3474
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01003475 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00003476 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003477 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01003478 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
3479 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003480
James Ward58dec6b2020-09-11 17:32:44 +01003481 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00003482
3483 switch (resizeMethod)
3484 {
3485 case ResizeMethod::Bilinear:
3486 {
James Ward58dec6b2020-09-11 17:32:44 +01003487 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00003488
3489 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3490 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
3491
David Monahan4a0c9b92020-05-30 09:48:39 +01003492 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00003493 break;
3494 }
3495 case ResizeMethod::NearestNeighbor:
3496 {
James Ward58dec6b2020-09-11 17:32:44 +01003497 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00003498 break;
3499 }
3500 default:
3501 {
3502 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003503 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
3504 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00003505 }
3506 }
3507
Mike Kelly377fb212023-01-10 15:55:28 +00003508 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01003509
3510 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01003511
3512 if (!layer)
3513 {
3514 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3515 operatorIndex, CHECK_LOCATION().AsString()));
3516 }
3517
Mike Kelly377fb212023-01-10 15:55:28 +00003518 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
3519 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003520 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3521
3522 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3523 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3524
3525 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3526 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3527}
3528
Tianle Chenge5a30ff2023-07-03 11:24:12 +01003529void TfLiteParserImpl::ParseReverseV2(size_t subgraphIndex, size_t operatorIndex)
3530{
3531 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3532
3533 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3534 CHECK_VALID_SIZE(inputs.size(), 2);
3535
3536 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3537 CHECK_VALID_SIZE(outputs.size(), 1);
3538
3539 auto layerName = fmt::format("ReverseV2:{}:{}", subgraphIndex, operatorIndex);
3540
3541 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3542 TensorInfo axisTensorInfo = ToTensorInfo(inputs[1]);
3543 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
3544
Tracy Narinebb8d7592023-07-13 16:50:54 +01003545 IConnectableLayer* layer = m_Network->AddReverseV2Layer(layerName.c_str());
Tianle Chenge5a30ff2023-07-03 11:24:12 +01003546 ARMNN_ASSERT(layer != nullptr);
3547
3548 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3549
3550 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Tracy Narinebb8d7592023-07-13 16:50:54 +01003551 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Tianle Chenge5a30ff2023-07-03 11:24:12 +01003552
3553 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3554 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3555}
3556
Teresa Charlin777008b2023-07-26 10:07:55 +01003557void TfLiteParserImpl::ParseTile(size_t subgraphIndex, size_t operatorIndex)
3558{
3559 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3560
3561 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3562 CHECK_VALID_SIZE(inputs.size(), 2);
3563
3564 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3565 CHECK_VALID_SIZE(outputs.size(), 1);
3566
3567 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3568 TensorInfo multiplesTensorInfo = ToTensorInfo(inputs[1]);
3569 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
3570
3571 auto layerName = fmt::format("Tile:{}:{}", subgraphIndex, operatorIndex);
3572
3573 TileDescriptor descriptor;
3574
3575 BufferRawPtr multiplesBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3576 if (multiplesBufferPtr != nullptr)
3577 {
3578 std::vector<int32_t> multiplesData(multiplesTensorInfo.GetNumElements());
3579 ::memcpy(multiplesData.data(), multiplesBufferPtr->data.data(), multiplesTensorInfo.GetNumBytes());
3580 descriptor.m_Multiples.assign(multiplesData.begin(), multiplesData.end());
3581 }
3582 else
3583 {
3584 ARMNN_THROW_PARSE_EXCEPTION("For Tile layer, Multiples data was not found in the buffer.");
3585 }
3586
3587 IConnectableLayer* layer = m_Network->AddTileLayer(descriptor, layerName.c_str());
3588 ARMNN_ASSERT(layer != nullptr);
3589
3590 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3591
3592 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3593 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3594
3595 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3596 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3597}
3598
Kevin May7d96b162021-02-03 17:38:41 +00003599void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan479045b2018-10-01 11:51:37 +01003600{
3601 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3602
Mike Kelly0d77ae12022-01-07 17:42:27 +00003603 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3604 const auto* options = operatorPtr->builtin_options.AsConcatenationOptions();
Sadik Armagan479045b2018-10-01 11:51:37 +01003605
3606 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
3607
3608 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3609 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Mike Kelly377fb212023-01-10 15:55:28 +00003610 auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex);
3611
Sadik Armagan479045b2018-10-01 11:51:37 +01003612 CHECK_VALID_SIZE(outputs.size(), 1);
3613
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003614 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
Mike Kelly377fb212023-01-10 15:55:28 +00003615 uint32_t inputRank = InputTensorInfo(subgraphIndex, operatorIndex, 0).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01003616
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003617 const unsigned int concatDimInput = static_cast<unsigned int>(
Mike Kelly377fb212023-01-10 15:55:28 +00003618 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01003619
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003620 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
3621 concatDescriptor.SetConcatAxis(concatDimInput);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003622 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01003623
3624 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
3625 {
Mike Kelly377fb212023-01-10 15:55:28 +00003626 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, viewIndex);
Sadik Armagan479045b2018-10-01 11:51:37 +01003627
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003628 // This set up concatDescriptor view origin
3629 armnnUtils::ProcessConcatInputTensorInfo(
Mike Kelly377fb212023-01-10 15:55:28 +00003630 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01003631 }
3632
James Ward58dec6b2020-09-11 17:32:44 +01003633 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01003634
Jim Flynn906f9462019-05-10 13:55:21 +01003635 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01003636
3637 if (!layer)
3638 {
3639 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3640 operatorIndex, CHECK_LOCATION().AsString()));
3641 }
3642
Mike Kelly377fb212023-01-10 15:55:28 +00003643 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003644 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01003645
James Conroy05102392020-06-24 15:39:55 +01003646 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003647 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01003648
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003649 // add fused activation layer
3650 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01003651
Sadik Armagan479045b2018-10-01 11:51:37 +01003652 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3653 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3654}
3655
Kevin May7d96b162021-02-03 17:38:41 +00003656void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003657{
3658 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3659
Mike Kelly0d77ae12022-01-07 17:42:27 +00003660 const auto& operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003661 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
3662
3663 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
3664
3665 FullyConnectedDescriptor desc;
3666 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01003667 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003668
3669 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3670 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3671 CHECK_VALID_SIZE(outputs.size(), 1);
3672
Mike Kelly377fb212023-01-10 15:55:28 +00003673 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003674
3675 // Fully Connected Layer accepts two dimensional weights input
3676 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
3677 if (weightsDimension != 2)
3678 {
3679 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003680 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
3681 "Node {}",
3682 weightsDimension,
3683 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003684 }
3685
Matthew Jackson74bf7da2019-08-16 16:51:42 +01003686 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01003687 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003688
Matthew Sloyan81beae32021-07-13 19:46:11 +01003689 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3690 // Add the first input tensor to the registration list
3691 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0]};
Mike Kelly377fb212023-01-10 15:55:28 +00003692 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003693
3694 desc.m_ConstantWeights = IsConstTensor(inputs[1]);
3695
Matthew Sloyan81beae32021-07-13 19:46:11 +01003696 // Add the weights input to the registration list, constant layers will be added by SetupConstantLayers if constant.
3697 tensorIndexesToRegister.emplace_back(inputTensorIndexes[1]);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003698
Mike Kelly0506ef02023-01-03 16:29:44 +00003699 if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
Mike Kelly5880b912022-01-28 16:18:54 +00003700 {
3701 m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
3702 }
3703
Finn Williamsd4fa5452021-03-01 12:31:41 +00003704 if (inputs.size() == 3)
3705 {
3706 desc.m_BiasEnabled = true;
Mike Kelly377fb212023-01-10 15:55:28 +00003707 armnn::TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Matthew Sloyan81beae32021-07-13 19:46:11 +01003708
3709 // Add the biases input to the registration list, constant layer will be added by SetupConstantLayers.
3710 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
Mike Kelly5880b912022-01-28 16:18:54 +00003711
Mike Kelly0506ef02023-01-03 16:29:44 +00003712 if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
Mike Kelly5880b912022-01-28 16:18:54 +00003713 {
3714 m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
3715 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00003716 }
3717
Matthew Sloyan81beae32021-07-13 19:46:11 +01003718 // Filters and biases are always passed to fully connected as inputs
3719 layer = m_Network->AddFullyConnectedLayer(desc, layerName.c_str());
Finn Williamsd4fa5452021-03-01 12:31:41 +00003720
Ryan OSheac229b3f2023-06-27 22:34:54 +01003721 if (!layer)
3722 {
3723 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3724 operatorIndex, CHECK_LOCATION().AsString()));
3725 }
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003726
Finn Williamsd4fa5452021-03-01 12:31:41 +00003727 unsigned int startingSlotIndex = 0;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003728 if (inputTensorInfo.GetNumDimensions() > 2)
3729 {
3730 // Add reshape to flatten to 2D [batch_size, input_size],
3731 // where "input_size" corresponds to the number of inputs to the layer,
3732 // matching the second dimension of weights,
3733 // and "batch_size" is calculated by dividing the number of elements by "input_size".
3734 std::vector<unsigned int> reshapedDimensions(2);
3735 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
3736 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
3737
3738 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
3739 {
3740 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003741 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
3742 reshapedDimensions[1],
3743 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003744 }
3745
Mike Kelly377fb212023-01-10 15:55:28 +00003746 armnn::TensorInfo reshapedTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003747 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
Mike Kelly377fb212023-01-10 15:55:28 +00003748 inputTensorInfo = reshapedTensorInfo;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003749
James Ward58dec6b2020-09-11 17:32:44 +01003750 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Finn Williamsd4fa5452021-03-01 12:31:41 +00003751 armnn::ReshapeDescriptor reshapeDescriptor;
3752 reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
Mike Kelly04d82292023-01-19 18:29:40 +00003753 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor,
3754 reshapeLayerName.c_str());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003755
3756 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
3757 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
3758
3759 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
Finn Williamsd4fa5452021-03-01 12:31:41 +00003760 // Fc layer connects to the reshape layer, so we skip the first input slot when registering fc's input slots
3761 tensorIndexesToRegister.erase(tensorIndexesToRegister.begin());
3762 startingSlotIndex = 1;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003763 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00003764
3765 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister, startingSlotIndex);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003766
Mike Kelly377fb212023-01-10 15:55:28 +00003767 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromShapes(subgraphIndex, operatorIndex, layer, 0,
3768 { inputTensorInfo.GetShape(),
3769 filterTensorInfo.GetShape() });
Mike Kelly04d82292023-01-19 18:29:40 +00003770
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003771 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3772
Mike Kelly04d82292023-01-19 18:29:40 +00003773 if (outputTensorInfo.GetNumDimensions() > 2)
3774 {
3775 // Calculate reshape to flatten to 2D [batch_size, input_size]
3776 std::vector<unsigned int> reshapedDimensions(2);
3777 reshapedDimensions[1] = filterTensorInfo.GetShape()[0];
3778 reshapedDimensions[0] = outputTensorInfo.GetNumElements() / reshapedDimensions[1];
3779 armnn::TensorInfo reshapedOutputTensorInfo = outputTensorInfo;
3780 if (outputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
3781 {
3782 throw ParseException(
3783 fmt::format("Failed to deduce output tensor shape from filter size {} {}",
3784 reshapedDimensions[1],
3785 CHECK_LOCATION().AsString()));
3786 }
3787 reshapedOutputTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
3788 layer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
3789
3790 std::string reshapeLayerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
3791 layer = AddReshapeLayer(layer, 0, reshapeLayerName, outputTensorInfo);
3792 }
3793
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003794 // we need to add the activation layer and fortunately we don't need to care about the data layout
3795 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
3796 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003797
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003798 // register the output connection slots for the layer, connections are made after all layers have been created
3799 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3800 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
Mike Kelly04d82292023-01-19 18:29:40 +00003801
3802 m_TensorInfos[outputTensorIndexes[0]] = layer->GetOutputSlot(0).GetTensorInfo();
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003803}
3804
Kevin May7d96b162021-02-03 17:38:41 +00003805void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
keidav011b3e2ea2019-02-21 10:07:37 +00003806{
3807 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3808
Mike Kelly0d77ae12022-01-07 17:42:27 +00003809 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
keidav011b3e2ea2019-02-21 10:07:37 +00003810
3811 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3812 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3813 CHECK_VALID_SIZE(outputs.size(), 4);
3814
3815 // Obtain custom options from flexbuffers
3816 auto custom_options = operatorPtr->custom_options;
3817 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
3818
3819 // Obtain descriptor information from tf lite
3820 DetectionPostProcessDescriptor desc;
3821 desc.m_MaxDetections = m["max_detections"].AsUInt32();
3822 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
3823 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
3824 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
3825 desc.m_NumClasses = m["num_classes"].AsUInt32();
3826 desc.m_ScaleH = m["h_scale"].AsFloat();
3827 desc.m_ScaleW = m["w_scale"].AsFloat();
3828 desc.m_ScaleX = m["x_scale"].AsFloat();
3829 desc.m_ScaleY = m["y_scale"].AsFloat();
3830
keidav0107d58c72019-02-26 11:57:39 +00003831 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00003832 {
keidav0107d58c72019-02-26 11:57:39 +00003833 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00003834 }
3835 if (!(m["detections_per_class"].IsNull()))
3836 {
3837 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
3838 }
3839
3840 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
3841 {
3842 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
3843 "must be positive and less than or equal to 1.");
3844 }
3845
Mike Kelly377fb212023-01-10 15:55:28 +00003846 armnn::TensorInfo anchorTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003847 auto anchorTensorAndData = CreateConstTensorNonPermuted(inputs[2], anchorTensorInfo);
keidav011b3e2ea2019-02-21 10:07:37 +00003848
James Ward58dec6b2020-09-11 17:32:44 +01003849 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003850 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData,
keidav011b3e2ea2019-02-21 10:07:37 +00003851 layerName.c_str());
3852
Ryan OSheac229b3f2023-06-27 22:34:54 +01003853 if (!layer)
3854 {
3855 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3856 operatorIndex, CHECK_LOCATION().AsString()));
3857 }
keidav011b3e2ea2019-02-21 10:07:37 +00003858
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003859 // The model does not specify the output shapes.
3860 // The output shapes are calculated from the max_detection and max_classes_per_detection.
3861 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
Mike Kelly377fb212023-01-10 15:55:28 +00003862 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox, 4 });
3863 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox });
3864 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox });
3865 m_OverriddenOutputShapes.push_back({ 1 });
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003866
keidav011b3e2ea2019-02-21 10:07:37 +00003867 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
3868 {
Mike Kelly377fb212023-01-10 15:55:28 +00003869 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverriddenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00003870 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
3871 }
3872
3873 // Register the input connection slots for the layer, connections are made after all layers have been created
3874 // only the tensors for the inputs are relevant, exclude the const tensors
3875 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3876 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3877
3878 // Register the output connection slots for the layer, connections are made after all layers have been created
3879 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3880 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
3881 outputTensorIndexes[1],
3882 outputTensorIndexes[2],
3883 outputTensorIndexes[3]});
3884}
3885
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003886/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
Kevin May7d96b162021-02-03 17:38:41 +00003887void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003888{
3889 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3890
3891 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3892 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3893 CHECK_VALID_SIZE(outputs.size(), 1);
3894
3895 if (inputs.size() < 1)
3896 {
3897 throw ParseException("Pack must have at least one input.");
3898 }
3899
3900 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3901 const auto* options = operatorPtr->builtin_options.AsPackOptions();
3902
3903 StackDescriptor desc;
3904 desc.m_Axis = static_cast<uint32_t>(options->axis);
3905 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
3906
3907 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00003908 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003909 desc.m_InputShape = inputTensorInfo.GetShape();
3910
James Ward58dec6b2020-09-11 17:32:44 +01003911 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003912 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
3913
Ryan OSheac229b3f2023-06-27 22:34:54 +01003914 if (!layer)
3915 {
3916 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3917 operatorIndex, CHECK_LOCATION().AsString()));
3918 }
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003919
Mike Kelly377fb212023-01-10 15:55:28 +00003920 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003921 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3922
3923 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3924 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
3925
3926 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3927 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3928}
3929
Mike Kelly5880b912022-01-28 16:18:54 +00003930void TfLiteParserImpl::ParseUnidirectionalSequenceLSTM(size_t subgraphIndex, size_t operatorIndex)
3931{
3932 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3933
3934 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3935 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3936
3937 if (inputs.size() < 2)
3938 {
3939 throw ParseException("UnidirectionalSequenceLSTM must have at least 2 input.");
3940 }
3941
3942 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3943 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
3944 const auto nodeParams = operatorPtr->builtin_options.AsUnidirectionalSequenceLSTMOptions();
3945 CHECK_SUPPORTED_FUSED_ACTIVATION(nodeParams, subgraphIndex, operatorIndex);
Mike Kelly377fb212023-01-10 15:55:28 +00003946 auto inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly5880b912022-01-28 16:18:54 +00003947 auto outputTensorInfo = ToTensorInfo(outputs[0]);
3948
3949 // Set the params structure for the AddUnidirectionalSequenceLstmLayer call
3950 // Please refer to each operand at
3951 // https://www.tensorflow.org/mlir/tfl_ops#tflunidirectional_sequence_lstm_tflunidirectionalsequencelstmop
3952 armnn::LstmInputParams params;
3953
3954 if (IsOptionalOperandPresent(operatorPtr->inputs[1]))
3955 {
3956 params.m_InputToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[1]].get(),
3957 inputTensorInfo).first;
3958 }
3959
3960 params.m_InputToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[2]].get(),
3961 inputTensorInfo).first;
3962 params.m_InputToCellWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[3]].get(),
3963 inputTensorInfo).first;
3964 params.m_InputToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[4]].get(),
3965 inputTensorInfo).first;
3966
3967 // Recurrent weight tensors of size {n_cell, n_output}
3968 if (IsOptionalOperandPresent(operatorPtr->inputs[5]))
3969 {
3970 params.m_RecurrentToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[5]].get(),
3971 inputTensorInfo).first;
3972 }
3973
3974 params.m_RecurrentToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[6]].get(),
3975 inputTensorInfo).first;
3976 params.m_RecurrentToCellWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[7]].get(),
3977 inputTensorInfo).first;
3978 params.m_RecurrentToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[8]].get(),
3979 inputTensorInfo).first;
3980
3981 // Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
3982 if (IsOptionalOperandPresent(operatorPtr->inputs[9]))
3983 {
3984 params.m_CellToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[9]].get(),
3985 inputTensorInfo).first;
3986 }
3987
3988 if (IsOptionalOperandPresent(operatorPtr->inputs[10]))
3989 {
3990 params.m_CellToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[10]].get(),
3991 inputTensorInfo).first;
3992 }
3993
3994 if (IsOptionalOperandPresent(operatorPtr->inputs[11]))
3995 {
3996 params.m_CellToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[11]].get(),
3997 inputTensorInfo).first;
3998 }
3999
4000 // Gates bias tensors of size {n_cell}
4001 if (IsOptionalOperandPresent(operatorPtr->inputs[12]))
4002 {
4003 params.m_InputGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[12]].get(),
4004 inputTensorInfo).first;
4005 }
4006
4007 params.m_ForgetGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[13]].get(),
4008 inputTensorInfo).first;
4009 params.m_CellBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[14]].get(),
4010 inputTensorInfo).first;
4011 params.m_OutputGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[15]].get(),
4012 inputTensorInfo).first;
4013
4014 // Projection weight tensor of size {n_output, n_cell}
4015 if (IsOptionalOperandPresent(operatorPtr->inputs[16]))
4016 {
4017 params.m_ProjectionWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[16]].get(),
4018 inputTensorInfo).first;
4019 }
4020 // Projection bias tensor of size {n_output}
4021 if (IsOptionalOperandPresent(operatorPtr->inputs[17]))
4022 {
4023 params.m_ProjectionBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[17]].get(),
4024 inputTensorInfo).first;
4025 }
4026
4027 // These state tensors are defined as variable tensors, and will be modified by this op.
4028 armnn::TensorInfo outputStateInInfo = ToTensorInfo(subgraphPtr->tensors[operatorPtr->inputs[18]].get());
4029 m_ConstantsToBeCreated.push_back(operatorPtr->inputs[18]);
4030 armnn::TensorInfo cellStateInInfo = ToTensorInfo(subgraphPtr->tensors[operatorPtr->inputs[19]].get());
4031 m_ConstantsToBeCreated.push_back(operatorPtr->inputs[19]);
4032
4033 // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix.
4034 if (inputs.size() >= 21 && IsOptionalOperandPresent(operatorPtr->inputs[20]))
4035 {
4036 params.m_InputLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[20]].get(),
4037 inputTensorInfo).first;
4038 }
4039
4040 if (inputs.size() >= 22 && IsOptionalOperandPresent(operatorPtr->inputs[21]))
4041 {
4042 params.m_ForgetLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[21]].get(),
4043 inputTensorInfo).first;
4044 }
4045
4046 if (inputs.size() >= 23 && IsOptionalOperandPresent(operatorPtr->inputs[22]))
4047 {
4048 params.m_CellLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[22]].get(),
4049 inputTensorInfo).first;
4050 }
4051
4052 if (inputs.size() >= 24 && IsOptionalOperandPresent(operatorPtr->inputs[23]))
4053 {
4054 params.m_OutputLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[23]].get(),
4055 inputTensorInfo).first;
4056 }
4057
4058 // set the layer descriptor
4059 armnn::UnidirectionalSequenceLstmDescriptor desc;
4060 desc.m_ActivationFunc = nodeParams->fused_activation_function;
4061 desc.m_ClippingThresCell = nodeParams->cell_clip;
4062 desc.m_ClippingThresProj = nodeParams->proj_clip;
4063 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr
4064 || params.m_RecurrentToInputWeights == nullptr
4065 || params.m_InputGateBias == nullptr);
4066 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr);
4067 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
4068 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr
4069 || params.m_ForgetLayerNormWeights != nullptr
4070 || params.m_CellLayerNormWeights != nullptr
4071 || params.m_OutputLayerNormWeights != nullptr);
4072 desc.m_TimeMajor = nodeParams->time_major;
4073
Mike Kellyc0800a32022-06-15 10:57:52 +01004074 if (operatorPtr->intermediates.size() > 3 && desc.m_LayerNormEnabled)
Mike Kelly5880b912022-01-28 16:18:54 +00004075 {
4076 auto inputIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[0]].get(),
4077 inputTensorInfo).first;
4078 auto inputIntermediateTensorInfo = inputIntermediate->GetInfo();
4079 desc.m_InputIntermediateScale = inputIntermediateTensorInfo.GetQuantizationScale();
4080
4081 auto forgetIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[1]].get(),
4082 inputTensorInfo).first;
4083 auto forgetIntermediateTensorInfo = forgetIntermediate->GetInfo();
4084 desc.m_ForgetIntermediateScale = forgetIntermediateTensorInfo.GetQuantizationScale();
4085
4086 auto cellIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[2]].get(),
4087 inputTensorInfo).first;
4088 auto cellIntermediateTensorInfo = cellIntermediate->GetInfo();
4089 desc.m_CellIntermediateScale = cellIntermediateTensorInfo.GetQuantizationScale();
4090
4091 auto outputIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[3]].get(),
4092 inputTensorInfo).first;
4093 auto outputIntermediateTensorInfo = outputIntermediate->GetInfo();
4094 desc.m_OutputIntermediateScale = outputIntermediateTensorInfo.GetQuantizationScale();
4095 }
4096 else
4097 {
4098 float defaultIntermediate = std::pow(2, -12);
4099 desc.m_InputIntermediateScale = defaultIntermediate;
4100 desc.m_ForgetIntermediateScale = defaultIntermediate;
4101 desc.m_CellIntermediateScale = defaultIntermediate;
4102 desc.m_OutputIntermediateScale = defaultIntermediate;
4103 }
4104
Mike Kellyc0800a32022-06-15 10:57:52 +01004105 if (operatorPtr->intermediates.size() > 4)
4106 {
4107 auto hiddentensor = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[4]].get(),
4108 inputTensorInfo).first;
Mike Kelly5880b912022-01-28 16:18:54 +00004109
Mike Kellyc0800a32022-06-15 10:57:52 +01004110 desc.m_HiddenStateScale = hiddentensor->GetInfo().GetQuantizationScale();
4111 desc.m_HiddenStateZeroPoint = hiddentensor->GetInfo().GetQuantizationOffset();
4112 }
Narumol Prangnawarat5f941242023-08-11 16:09:26 +01004113 unsigned int batchSize = desc.m_TimeMajor ? inputTensorInfo.GetShape()[1] : inputTensorInfo.GetShape()[0];
Mike Kelly5880b912022-01-28 16:18:54 +00004114 unsigned int outputSize = outputTensorInfo.GetShape()[2];
4115 unsigned int numUnits = cellStateInInfo.GetShape()[1];
4116
4117 armnn::DataType dataType = inputTensorInfo.GetDataType();
4118 float qScale = inputTensorInfo.GetQuantizationScale();
4119 float qOffset = inputTensorInfo.GetQuantizationOffset();
4120
4121 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset);
4122 if (!desc.m_CifgEnabled)
4123 {
4124 scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
4125 }
4126 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits},
4127 cellStateInInfo.GetDataType(),
4128 cellStateInInfo.GetQuantizationScale(),
4129 cellStateInInfo.GetQuantizationOffset());
4130 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
4131
4132 armnn::LstmInputParamsInfo paramsInfo;
4133 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4134 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4135 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4136 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
4137 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4138 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
4139 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4140 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4141 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4142
4143 if (!desc.m_CifgEnabled)
4144 {
4145 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4146 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
4147 if (params.m_CellToInputWeights != nullptr)
4148 {
4149 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
4150 }
4151 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4152 }
4153
4154 if (desc.m_ProjectionEnabled)
4155 {
4156 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
4157 if (params.m_ProjectionBias != nullptr)
4158 {
4159 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
4160 }
4161 }
4162
4163 if (desc.m_PeepholeEnabled)
4164 {
4165 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
4166 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
4167 }
4168
4169 if (desc.m_LayerNormEnabled)
4170 {
4171 if(!desc.m_CifgEnabled)
4172 {
4173 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
4174 }
4175 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
4176 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
4177 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
4178 }
4179
4180 auto layerName = fmt::format("UnidirectionalSequenceLSTM:{}:{}", subgraphIndex, operatorIndex);
4181 armnn::IConnectableLayer* layer = m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
Ryan OSheac229b3f2023-06-27 22:34:54 +01004182
4183 if (!layer)
4184 {
4185 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4186 operatorIndex, CHECK_LOCATION().AsString()));
4187 }
Mike Kelly5880b912022-01-28 16:18:54 +00004188
4189 // register the input connection slots for the layer, connections are made after all layers have been created
4190 // only the tensors for the inputs are relevant, exclude the const tensors
4191 auto inputTensorIndexes = AsUnsignedVector({operatorPtr->inputs[0],
4192 operatorPtr->inputs[18],
4193 operatorPtr->inputs[19]});
4194 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0],
4195 inputTensorIndexes[1],
4196 inputTensorIndexes[2]});
4197
4198 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4199
4200 layer->GetOutputSlot(0).SetTensorInfo(outputStateOutTensorInfo);
4201 layer->GetOutputSlot(1).SetTensorInfo(cellStateOutTensorInfo);
4202 layer->GetOutputSlot(2).SetTensorInfo(outputTensorInfo);
4203
4204 unsigned int tensorIndex = outputTensorIndexes[0];
4205 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(2));
4206 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
4207}
4208
Kevin May7d96b162021-02-03 17:38:41 +00004209void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd200e3802019-04-15 09:47:39 +01004210{
4211 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4212
Mike Kelly0d77ae12022-01-07 17:42:27 +00004213 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4214 const auto* options = operatorPtr->builtin_options.AsUnpackOptions();
Nina Drozd200e3802019-04-15 09:47:39 +01004215
4216 // This unpackAxis indicates the axis to unpack
4217 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
4218
4219 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4220 CHECK_VALID_SIZE(inputs.size(), 1);
4221
Mike Kelly377fb212023-01-10 15:55:28 +00004222 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004223
4224 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
4225 {
4226 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004227 fmt::format("The unpack axis: {} cannot be greater than or equal to "
4228 "the number of input dimension {} {}",
4229 unpackAxis,
4230 inputTensorInfo.GetNumDimensions(),
4231 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004232 }
4233
Nina Drozd200e3802019-04-15 09:47:39 +01004234 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
4235 // If num is not defined, automatically infer from the length of the dimension axis.
4236 if(unpackNum == 0)
4237 {
4238 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
4239 }
4240
4241 // If unpack number cannot be inferred and is still zero, throw ParseException.
4242 if(unpackNum == 0)
4243 {
4244 throw ParseException("Number to unpack must greater than zero.");
4245 }
4246
4247 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4248 CHECK_VALID_SIZE(outputs.size(), unpackNum);
4249
4250 auto inputDimSize = inputTensorInfo.GetNumDimensions();
4251 std::vector<unsigned int> unpackDimSizes(inputDimSize);
4252
4253 // Add current input shape to unpackDimSizes
4254 for (unsigned int i = 0; i < inputDimSize; ++i)
4255 {
4256 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
4257 }
4258
4259 if (unpackDimSizes[unpackAxis] != unpackNum)
4260 {
4261 throw ParseException("Number to unpack must be the same as length of the dimension to "
4262 "unpack along.");
4263 }
4264
4265 unpackDimSizes[unpackAxis] /= unpackNum;
4266
4267 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
4268 for (unsigned int j = 0; j < unpackNum; ++j)
4269 {
4270 // Set the size of the views.
4271 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
4272 {
4273 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
4274 }
4275 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
4276 }
Nikhil Raj Arm008270f2023-08-11 08:55:21 +00004277
James Ward58dec6b2020-09-11 17:32:44 +01004278 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01004279 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004280
4281 if (!layer)
4282 {
4283 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4284 operatorIndex, CHECK_LOCATION().AsString()));
4285 }
Nina Drozd200e3802019-04-15 09:47:39 +01004286
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004287 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
4288 unpackDimSizes.data());
4289
Nina Drozd200e3802019-04-15 09:47:39 +01004290 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4291 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4292
Finn Williamsb49ed182021-06-29 15:50:08 +01004293 std::vector<unsigned int> reshapeDims;
4294 for (unsigned int axis = 0; axis < splitOutShape.GetNumDimensions(); ++axis)
4295 {
4296 if (axis != unpackAxis)
4297 {
4298 reshapeDims.push_back(splitOutShape[axis]);
4299 }
4300 }
4301
4302 TensorShape reshapeOutputShape(splitOutShape.GetNumDimensions() -1, reshapeDims.data());
4303
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004304 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
4305 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4306 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01004307 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01004308 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004309 armnn::ReshapeDescriptor desc;
Finn Williamsb49ed182021-06-29 15:50:08 +01004310 desc.m_TargetShape = reshapeOutputShape;
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004311 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
4312
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01004313 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
4314 outputTensorInfo.GetDataType(),
4315 outputTensorInfo.GetQuantizationScale(),
4316 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004317 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
4318
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01004319 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004320
4321 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
4322 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
4323 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
4324 }
Nina Drozd200e3802019-04-15 09:47:39 +01004325}
4326
Kevin May7d96b162021-02-03 17:38:41 +00004327void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd0324f482019-04-08 10:52:10 +01004328{
4329 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4330
Mike Kelly0d77ae12022-01-07 17:42:27 +00004331 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4332 const auto* options = operatorPtr->builtin_options.AsSplitOptions();
Nina Drozd0324f482019-04-08 10:52:10 +01004333
4334 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
4335
Nina Drozd200e3802019-04-15 09:47:39 +01004336 // If number of splits cannot be inferred and is zero, throw ParseException.
4337 if(numSplits == 0)
4338 {
4339 throw ParseException("Number to splits must greater than zero.");
4340 }
4341
Nina Drozd0324f482019-04-08 10:52:10 +01004342 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4343 CHECK_VALID_SIZE(inputs.size(), 2);
4344 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4345 CHECK_VALID_SIZE(outputs.size(), numSplits);
4346
Mike Kelly377fb212023-01-10 15:55:28 +00004347 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4348 armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Ryan OSheac229b3f2023-06-27 22:34:54 +01004349
4350 if (axisTensorInfo.GetNumElements() != 1)
4351 {
4352 throw ParseException(fmt::format("Axis tensor can only have 1 element {}",
4353 CHECK_LOCATION().AsString()));
4354 }
Nina Drozd0324f482019-04-08 10:52:10 +01004355
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01004356 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004357 if (axisBufferPtr == nullptr)
4358 {
4359 throw ParseException(
4360 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4361 CHECK_LOCATION().AsString()));
4362 }
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01004363
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004364 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
4365 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
4366 int32_t axis = axisData[0];
4367
4368 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4369 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4370 {
4371 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4372 // E.g. Rank 4 tensor can have axis in range [-4, 3)
4373 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4374 throw ParseException(
4375 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4376 axis,
4377 CHECK_LOCATION().AsString()));
4378 }
4379
4380 const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
Nina Drozd0324f482019-04-08 10:52:10 +01004381
Nina Drozd0324f482019-04-08 10:52:10 +01004382 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01004383 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01004384 {
4385 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004386 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
4387 inputTensorInfo.GetNumDimensions(),
4388 MaxNumOfTensorDimensions,
4389 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01004390 }
4391
4392 std::vector<unsigned int> splitterDimSizes(inputDimSize);
4393
4394 // Add current input shape to splitterDimSizes
4395 for (unsigned int i = 0; i < inputDimSize; ++i)
4396 {
4397 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
4398 }
4399
4400 if (splitterDimSizes[splitDim] % numSplits != 0)
4401 {
4402 throw ParseException("Number of splits must evenly divide the dimension");
4403 }
4404 splitterDimSizes[splitDim] /= numSplits;
4405
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01004406 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01004407 for (unsigned int j = 0; j < numSplits; ++j)
4408 {
4409 // Set the size of the views.
4410 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
4411 {
4412 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
4413 }
4414 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
4415 }
Nikhil Raj Arm008270f2023-08-11 08:55:21 +00004416
James Ward58dec6b2020-09-11 17:32:44 +01004417 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01004418 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004419
4420 if (!layer)
4421 {
4422 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4423 operatorIndex, CHECK_LOCATION().AsString()));
4424 }
Nina Drozd0324f482019-04-08 10:52:10 +01004425
4426 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01004427 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01004428
Nina Drozd0324f482019-04-08 10:52:10 +01004429 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4430 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01004431 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01004432 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01004433 }
4434
4435 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4436 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4437}
4438
Derek Lambertif0176992020-04-28 13:37:49 +01004439unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
4440{
4441 int numDims = armnn::numeric_cast<int>(numDimsIn);
4442 int v = idx < 0 ? numDims + idx : idx;
Ryan OSheac229b3f2023-06-27 22:34:54 +01004443
4444 if (v < 0 || v > numDims)
4445 {
4446 throw ParseException(fmt::format("Unable to compute index {}", CHECK_LOCATION().AsString()));
4447 }
Derek Lambertif0176992020-04-28 13:37:49 +01004448
4449 return static_cast<unsigned int>(v);
4450}
4451
Kevin May7d96b162021-02-03 17:38:41 +00004452void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
Derek Lambertif0176992020-04-28 13:37:49 +01004453{
4454 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4455
Mike Kelly0d77ae12022-01-07 17:42:27 +00004456 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4457 const auto* options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01004458
4459 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4460 CHECK_VALID_SIZE(inputs.size(), 3);
4461
4462 auto& inputTensor = inputs[0];
4463 auto& splitsTensor = inputs[1];
4464 auto& axisTensor = inputs[2];
4465
4466 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
4467 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
4468 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
Ryan OSheac229b3f2023-06-27 22:34:54 +01004469
4470 if (axisTensorInfo.GetNumElements() != 1)
4471 {
4472 throw ParseException(fmt::format("Axis tensor can only have 1 element {}",
4473 CHECK_LOCATION().AsString()));
4474 }
Derek Lambertif0176992020-04-28 13:37:49 +01004475
4476 // Inputs
4477 auto inputDimSize = inputTensorInfo.GetNumDimensions();
4478 if (inputDimSize > MaxNumOfTensorDimensions)
4479 {
4480 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004481 fmt::format("The number of dimensions: {} for input tensors of the "
4482 "SplitV op cannot be greater than {} {}",
4483 inputTensorInfo.GetNumDimensions(),
4484 MaxNumOfTensorDimensions,
4485 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01004486 }
4487
4488 // Get split axis
4489 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004490 if (axisBufferPtr == nullptr)
4491 {
4492 throw ParseException(
4493 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4494 CHECK_LOCATION().AsString()));
4495 }
4496
Derek Lambertif0176992020-04-28 13:37:49 +01004497 std::vector<int> axisData(axisTensorInfo.GetNumElements());
4498 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004499 int32_t axis = axisData[0];
4500
4501 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4502 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4503 {
4504 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4505 // E.g. Rank 4 tensor can have axis in range [-4, 3)
4506 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4507 throw ParseException(
4508 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4509 axis,
4510 CHECK_LOCATION().AsString()));
4511 }
4512 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
Derek Lambertif0176992020-04-28 13:37:49 +01004513
Derek Lambertif0176992020-04-28 13:37:49 +01004514 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01004515 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01004516 unsigned int numSplits{0};
4517
4518 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01004519 {
4520 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01004521 }
4522 else
4523 {
Ryan OShea86704732020-05-26 11:41:04 +01004524 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01004525 }
4526
4527 if (numSplits <=0)
4528 {
4529 throw ParseException("SplitV has invalid number of splits");
4530 }
4531
Jan Eilersc0761e92020-06-29 16:48:44 +01004532 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01004533 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01004534 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01004535
Jan Eilersc0761e92020-06-29 16:48:44 +01004536 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01004537 int numInferred{0};
4538 unsigned int inferIdx{0};
4539 int splitSum{0};
4540 for (auto split : splitsData)
4541 {
4542 if (split < 0)
4543 {
4544 numInferred++;
4545 inferIdx = idx;
4546 }
4547 else
4548 {
4549 splitSum += split;
4550 }
4551 idx++;
4552 }
4553 // Check for inferred Axis
4554 if (numInferred == 0)
4555 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004556 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01004557 {
4558 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
4559 }
4560 }
4561 else if (numInferred == 1)
4562 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004563 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01004564 }
4565 else
4566 {
4567 throw ParseException("Cannot infer split size for more than one split");
4568 }
4569
Derek Lambertif0176992020-04-28 13:37:49 +01004570 //Ouput size validation
4571 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4572 CHECK_VALID_SIZE(outputs.size(), numSplits);
4573
4574 // Setup Armnn descriptor
4575 SplitterDescriptor splitDesc(numSplits, inputDimSize);
4576 unsigned int accumSplit = 0;
4577 for (unsigned int j = 0; j < numSplits; ++j)
4578 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004579 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01004580
4581 // Set the size of the views.
4582 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
4583 {
4584 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
4585 if (dimIdx == splitDim)
4586 {
4587 dimSize = splitSize;
4588 }
4589 splitDesc.SetViewSize(j, dimIdx, dimSize);
4590 }
4591
4592 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
4593 accumSplit += splitSize;
4594 }
4595
James Ward58dec6b2020-09-11 17:32:44 +01004596 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01004597 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004598
4599 if (!layer)
4600 {
4601 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4602 operatorIndex, CHECK_LOCATION().AsString()));
4603 }
Derek Lambertif0176992020-04-28 13:37:49 +01004604
4605 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4606 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4607
4608 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4609 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01004610 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01004611 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
4612 }
4613
4614 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4615 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4616}
4617
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004618void TfLiteParserImpl::ParseArgMin(size_t subgraphIndex, size_t operatorIndex)
4619{
4620 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Min);
4621}
4622
Kevin May7d96b162021-02-03 17:38:41 +00004623void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
Inki Daed4619e22020-09-10 15:33:54 +09004624{
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004625 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Max);
4626}
4627
4628void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, ArgMinMaxFunction argMinMaxFunction)
4629{
Inki Daed4619e22020-09-10 15:33:54 +09004630 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4631 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4632 CHECK_VALID_SIZE(inputs.size(), 2);
4633
4634 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4635 CHECK_VALID_SIZE(outputs.size(), 1);
4636
Mike Kelly377fb212023-01-10 15:55:28 +00004637 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4638 armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Inki Daed4619e22020-09-10 15:33:54 +09004639 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Ryan OSheac229b3f2023-06-27 22:34:54 +01004640
4641 if (axisTensorInfo.GetNumElements() != 1)
4642 {
4643 throw ParseException(fmt::format("Axis tensor can only have 1 element {}",
4644 CHECK_LOCATION().AsString()));
4645 }
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004646
4647 // Check if output tensor type is Signed32 or Signed64
Mike Kelly1f140f72021-04-06 12:25:55 +01004648 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
4649 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
4650 {
4651 throw ParseException(
4652 fmt::format(
4653 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
4654 CHECK_LOCATION().AsString()));
4655 }
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004656
4657 // Get const axis value from model and set it to descriptor.
4658 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
4659 if (axisBufferPtr == nullptr)
4660 {
4661 throw ParseException(
4662 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4663 CHECK_LOCATION().AsString()));
4664 }
4665
4666 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
4667 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
4668 int32_t axis = axisData.front();
4669
4670 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4671 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4672 {
4673 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4674 // E.g. Rank 4 tensor can have axis in range [-4, 3)
4675 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4676 throw ParseException(
4677 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4678 axis,
4679 CHECK_LOCATION().AsString()));
4680 }
4681
4682 ArgMinMaxDescriptor desc;
4683 desc.m_Axis = axis;
4684 desc.m_Function = argMinMaxFunction;
4685
4686 // Register a ArgMin/ArgMax layer.
4687 auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}";
4688 auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4689 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004690
4691 if (!layer)
4692 {
4693 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4694 operatorIndex, CHECK_LOCATION().AsString()));
4695 }
4696
Mike Kelly377fb212023-01-10 15:55:28 +00004697 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Inki Daed4619e22020-09-10 15:33:54 +09004698 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4699
4700 // Register input tensor to the layer.
4701 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4702 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4703
4704 // Register output tensor to the layer.
4705 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4706 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4707}
4708
Kevin May7d96b162021-02-03 17:38:41 +00004709void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00004710{
4711 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4712
Kevin May7d96b162021-02-03 17:38:41 +00004713 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004714 CHECK_VALID_SIZE(inputs.size(), 2);
Kevin May7d96b162021-02-03 17:38:41 +00004715 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004716 CHECK_VALID_SIZE(outputs.size(), 1);
4717
Mike Kelly377fb212023-01-10 15:55:28 +00004718 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4719 armnn::TensorInfo indicesTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4720 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Sadik Armagan26868492021-01-22 14:25:31 +00004721
4722 armnn::GatherDescriptor gatherDescriptor;
4723
Mike Kelly0d77ae12022-01-07 17:42:27 +00004724 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4725 const auto* options = operatorPtr->builtin_options.AsGatherOptions();
Sadik Armagan26868492021-01-22 14:25:31 +00004726 auto axis = options->axis;
4727
Mike Kelly377fb212023-01-10 15:55:28 +00004728 auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
4729
Sadik Armagan26868492021-01-22 14:25:31 +00004730 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4731 auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
4732 auto outputDimensions = outputTensorInfo.GetNumDimensions();
4733 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4734 {
4735 throw ParseException(
4736 fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
4737 axis,
4738 inputDimensions, inputDimensions,
4739 CHECK_LOCATION().AsString()));
4740 }
4741 if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
4742 {
4743 throw ParseException(
4744 fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
4745 outputDimensions,
4746 inputDimensions, indicesDimensions,
4747 CHECK_LOCATION().AsString()));
4748 }
4749
4750 gatherDescriptor.m_Axis = axis;
4751
Sadik Armagan26868492021-01-22 14:25:31 +00004752 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004753
4754 if (!layer)
4755 {
4756 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4757 operatorIndex, CHECK_LOCATION().AsString()));
4758 }
4759
Mike Kelly377fb212023-01-10 15:55:28 +00004760 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Sadik Armagan26868492021-01-22 14:25:31 +00004761 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4762
4763 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4764 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4765
4766 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4767 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4768}
4769
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004770void TfLiteParserImpl::ParseGatherNd(size_t subgraphIndex, size_t operatorIndex)
4771{
4772 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4773
4774 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4775 CHECK_VALID_SIZE(inputs.size(), 2);
4776 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4777 CHECK_VALID_SIZE(outputs.size(), 1);
4778
Mike Kelly377fb212023-01-10 15:55:28 +00004779 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4780 armnn::TensorInfo indicesTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004781
4782 auto layerName = fmt::format("GatherNd:{}:{}", subgraphIndex, operatorIndex);
4783 IConnectableLayer* layer = m_Network->AddGatherNdLayer(layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004784
4785 if (!layer)
4786 {
4787 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4788 operatorIndex, CHECK_LOCATION().AsString()));
4789 }
4790
Mike Kelly377fb212023-01-10 15:55:28 +00004791 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004792 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4793
4794 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4795 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4796
4797 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4798 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4799}
4800
Kevin May7d96b162021-02-03 17:38:41 +00004801void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00004802{
4803 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4804
Kevin May7d96b162021-02-03 17:38:41 +00004805 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004806 CHECK_VALID_SIZE(inputs.size(), 1);
Kevin May7d96b162021-02-03 17:38:41 +00004807 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004808 CHECK_VALID_SIZE(outputs.size(), 1);
4809
4810 armnn::DepthToSpaceDescriptor descriptor;
4811
Mike Kelly0d77ae12022-01-07 17:42:27 +00004812 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4813 const auto* options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
Sadik Armagan26868492021-01-22 14:25:31 +00004814 auto blockSize = options->block_size;
4815 if (blockSize < 2)
4816 {
4817 throw ParseException(
4818 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
4819 blockSize,
4820 CHECK_LOCATION().AsString()));
4821 }
4822 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
4823
4824 auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
4825 IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004826
4827 if (!layer)
4828 {
4829 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4830 operatorIndex, CHECK_LOCATION().AsString()));
4831 }
4832
Mike Kelly377fb212023-01-10 15:55:28 +00004833 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan26868492021-01-22 14:25:31 +00004834 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4835
4836 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4837 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4838
4839 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4840 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4841}
4842
Kevin May7d96b162021-02-03 17:38:41 +00004843void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004844{
Sadik Armagana2747482021-02-09 10:28:54 +00004845 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Sum);
4846}
4847
Teresa Charlin4e3e8312021-08-05 12:34:37 +01004848void TfLiteParserImpl::ParseReduceProd(size_t subgraphIndex, size_t operatorIndex)
4849{
4850 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Prod);
4851}
4852
Sadik Armagana2747482021-02-09 10:28:54 +00004853void TfLiteParserImpl::ParseReduceMax(size_t subgraphIndex, size_t operatorIndex)
4854{
4855 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Max);
4856}
4857
4858void TfLiteParserImpl::ParseReduceMin(size_t subgraphIndex, size_t operatorIndex)
4859{
4860 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Min);
4861}
4862
4863void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, ReduceOperation reduceOperation)
4864{
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004865 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4866
Mike Kelly0d77ae12022-01-07 17:42:27 +00004867 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4868 const auto* options = operatorPtr->builtin_options.AsReducerOptions();
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004869
4870 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4871 CHECK_VALID_SIZE(inputs.size(), 2);
4872
4873 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4874 CHECK_VALID_SIZE(outputs.size(), 1);
4875
Sadik Armagana2747482021-02-09 10:28:54 +00004876 auto layerName = fmt::format("Reduce:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004877
Mike Kelly377fb212023-01-10 15:55:28 +00004878 armnn::TensorInfo inputTensorInfo0 = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4879 armnn::TensorInfo inputTensorInfo1 = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004880
4881 ReduceDescriptor desc;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004882 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
4883 // Get const axis value from model and set it to descriptor.
4884 if (axisBufferPtr != nullptr)
4885 {
Sadik Armagan49bdb792021-02-11 13:57:07 +00004886 std::vector<int32_t> axisData(inputTensorInfo1.GetNumElements());
4887 ::memcpy(axisData.data(), axisBufferPtr->data.data(), inputTensorInfo1.GetNumBytes());
4888
4889 // Convert the axis to unsigned int and remove duplicates.
4890 auto rank = static_cast<int32_t>(inputTensorInfo0.GetNumDimensions());
4891 std::set<unsigned int> uniqueAxis;
4892 std::transform(axisData.begin(),
4893 axisData.end(),
4894 std::inserter(uniqueAxis, uniqueAxis.begin()),
4895 [rank](int i)->unsigned int{
4896 return static_cast<uint32_t>(((i + rank) % rank)); });
4897 desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004898 }
Sadik Armagana2747482021-02-09 10:28:54 +00004899 else
4900 {
4901 for (uint32_t i = 0; i < inputTensorInfo0.GetNumDimensions(); ++i)
4902 {
4903 desc.m_vAxis.push_back(i);
4904 }
4905 }
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004906
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004907 desc.m_KeepDims = options->keep_dims;
Sadik Armagana2747482021-02-09 10:28:54 +00004908 desc.m_ReduceOperation = reduceOperation;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004909
4910 // Register a new layer object, Sum.
Mike Kelly0d77ae12022-01-07 17:42:27 +00004911 IConnectableLayer* layer = m_Network->AddReduceLayer(desc, layerName.c_str());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004912
Mike Kelly377fb212023-01-10 15:55:28 +00004913 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004914 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4915
4916 // Register input tensor to the layer.
4917 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4918 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4919
4920 // Register output tensor to the layer.
4921 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4922 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4923}
4924
Mike Kelly31dce2b2021-09-01 21:22:37 +01004925void TfLiteParserImpl::ParseLocalResponseNormalization(size_t subgraphIndex, size_t operatorIndex)
4926{
4927 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4928
4929 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4930 CHECK_VALID_SIZE(inputs.size(), 1);
4931
4932 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4933 CHECK_VALID_SIZE(outputs.size(), 1);
4934
4935 auto layerName = fmt::format("LRN:{}:{}", subgraphIndex, operatorIndex);
4936 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4937
Mike Kelly377fb212023-01-10 15:55:28 +00004938 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly31dce2b2021-09-01 21:22:37 +01004939
4940 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4941 const auto* options = operatorPtr->builtin_options.AsLocalResponseNormalizationOptions();
4942
4943 armnn::NormalizationDescriptor descriptor;
4944 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4945 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
4946 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
4947 descriptor.m_NormSize = static_cast<uint32_t>(options->radius);
4948 descriptor.m_K = options->bias;
4949 descriptor.m_Alpha = options->alpha;
4950 descriptor.m_Beta = options->beta;
4951
4952 // ArmNN expects normSize to be the full size of the normalization
4953 // window rather than the radius as in TfLite.
4954 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
4955
4956 IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor, layerNameFormatted.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004957
4958 if (!layer)
4959 {
4960 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4961 operatorIndex, CHECK_LOCATION().AsString()));
4962 }
Mike Kelly31dce2b2021-09-01 21:22:37 +01004963
Mike Kelly377fb212023-01-10 15:55:28 +00004964 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Mike Kelly31dce2b2021-09-01 21:22:37 +01004965 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4966
4967 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4968 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4969
4970 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4971 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4972}
4973
Teresa Charlin28aa6692022-07-12 11:18:44 +01004974void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
4975{
4976 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
4977}
4978
Teresa Charlin93f0ad02023-03-23 15:28:02 +00004979void TfLiteParserImpl::ParseCeil(size_t subgraphIndex, size_t operatorIndex)
4980{
4981 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Ceil);
4982}
4983
Teresa Charlin28aa6692022-07-12 11:18:44 +01004984void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
4985{
4986 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
4987}
4988
4989void TfLiteParserImpl::ParseLog(size_t subgraphIndex, size_t operatorIndex)
4990{
4991 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Log);
4992}
4993
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004994void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
4995{
4996 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
4997}
4998
4999void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
5000{
5001 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
5002}
5003
John Mcloughlin0ec00872023-05-15 17:03:49 +01005004void TfLiteParserImpl::ParsePower(size_t subgraphIndex, size_t operatorIndex)
5005{
5006 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5007
5008 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5009 CHECK_VALID_SIZE(inputs.size(), 2);
5010
5011 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5012 CHECK_VALID_SIZE(outputs.size(), 1);
5013
5014 auto layerName = fmt::format("Power:{}:{}", subgraphIndex, operatorIndex);
5015
5016 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
5017 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
5018 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
5019
5020 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Power, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01005021
5022 if (!layer)
5023 {
5024 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5025 operatorIndex, CHECK_LOCATION().AsString()));
5026 }
John Mcloughlin0ec00872023-05-15 17:03:49 +01005027
5028 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
5029 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
5030 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5031
5032 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5033 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
5034
5035 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5036 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
5037}
5038
Matthew Sloyaned7fce42021-04-15 20:46:24 +01005039void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
5040{
5041 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
5042}
5043
Teresa Charlin28aa6692022-07-12 11:18:44 +01005044void TfLiteParserImpl::ParseSin(size_t subgraphIndex, size_t operatorIndex)
5045{
5046 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sin);
5047}
5048
Teresa Charlinf0fce5b2022-05-04 17:24:43 +01005049void TfLiteParserImpl::ParseSqrt(size_t subgraphIndex, size_t operatorIndex)
5050{
5051 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sqrt);
5052}
5053
Teresa Charlin6963b332023-07-11 11:35:41 +01005054void TfLiteParserImpl::ParseSquare(size_t subgraphIndex, size_t operatorIndex)
5055{
5056 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5057
5058 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5059 CHECK_VALID_SIZE(inputs.size(), 1);
5060
5061 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5062 CHECK_VALID_SIZE(outputs.size(), 1);
5063
5064 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
5065
5066 auto layerName = fmt::format("Square:{}:{}", subgraphIndex, operatorIndex);
5067 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Mul, layerName.c_str());
5068 ARMNN_ASSERT(layer != nullptr);
5069
5070 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 0});
5071 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
5072 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5073
5074 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5075 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[0]});
5076
5077 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5078 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
5079}
5080
John Mcloughlin0ec00872023-05-15 17:03:49 +01005081void TfLiteParserImpl::ParseSquaredDifference(size_t subgraphIndex, size_t operatorIndex)
5082{
5083 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5084
5085 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5086 CHECK_VALID_SIZE(inputs.size(), 2);
5087
5088 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5089 CHECK_VALID_SIZE(outputs.size(), 1);
5090
5091 auto layerName = fmt::format("SquaredDifference:{}:{}", subgraphIndex, operatorIndex);
5092
5093 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
5094 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
5095 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
5096
5097 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::SqDiff, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01005098
5099 if (!layer)
5100 {
5101 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5102 operatorIndex, CHECK_LOCATION().AsString()));
5103 }
John Mcloughlin0ec00872023-05-15 17:03:49 +01005104
5105 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
5106 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
5107 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5108
5109 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5110 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
5111
5112 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5113 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
5114}
5115
Matthew Sloyaned7fce42021-04-15 20:46:24 +01005116void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
5117{
5118 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5119
5120 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5121 CHECK_VALID_SIZE(inputs.size(), 1);
5122
5123 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5124 CHECK_VALID_SIZE(outputs.size(), 1);
5125
5126 std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
5127 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
5128
5129 ElementwiseUnaryDescriptor desc;
5130 desc.m_Operation = unaryOperation;
5131 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01005132
5133 if (!layer)
5134 {
5135 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5136 operatorIndex, CHECK_LOCATION().AsString()));
5137 }
Matthew Sloyaned7fce42021-04-15 20:46:24 +01005138
Mike Kelly377fb212023-01-10 15:55:28 +00005139 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Sloyaned7fce42021-04-15 20:46:24 +01005140 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5141
5142 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5143 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
5144
5145 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5146 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
5147}
5148
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03005149void TfLiteParserImpl::ParseEqual(size_t subgraphIndex, size_t operatorIndex)
5150{
5151 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Equal);
5152}
5153
5154void TfLiteParserImpl::ParseNotEqual(size_t subgraphIndex, size_t operatorIndex)
5155{
5156 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::NotEqual);
5157}
5158
5159void TfLiteParserImpl::ParseGreater(size_t subgraphIndex, size_t operatorIndex)
5160{
5161 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Greater);
5162}
5163
5164void TfLiteParserImpl::ParseGreaterOrEqual(size_t subgraphIndex, size_t operatorIndex)
5165{
5166 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::GreaterOrEqual);
5167}
5168
5169void TfLiteParserImpl::ParseLess(size_t subgraphIndex, size_t operatorIndex)
5170{
5171 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Less);
5172}
5173
5174void TfLiteParserImpl::ParseLessOrEqual(size_t subgraphIndex, size_t operatorIndex)
5175{
5176 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::LessOrEqual);
5177}
5178
5179void TfLiteParserImpl::ParseComparison(size_t subgraphIndex, size_t operatorIndex,
5180 ComparisonOperation comparisonOperation)
5181{
5182 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5183
5184 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5185 CHECK_VALID_SIZE(inputs.size(), 2);
5186
5187 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5188 CHECK_VALID_SIZE(outputs.size(), 1);
5189
5190 auto layerName = std::string(GetComparisonOperationAsCString(comparisonOperation)) + ":{}:{}";
5191 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
5192
Mike Kelly377fb212023-01-10 15:55:28 +00005193 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
5194 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03005195 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerNameFormatted, "Input 0", "Input 1");
5196
5197 ComparisonDescriptor desc;
5198 desc.m_Operation = comparisonOperation;
5199 IConnectableLayer* layer = m_Network->AddComparisonLayer(desc, layerNameFormatted.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01005200
5201 if (!layer)
5202 {
5203 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5204 operatorIndex, CHECK_LOCATION().AsString()));
5205 }
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03005206
Mike Kelly377fb212023-01-10 15:55:28 +00005207 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03005208 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5209
5210 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5211 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
5212
5213 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5214 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
5215}
5216
Mike Kelly04d82292023-01-19 18:29:40 +00005217armnn::IConnectableLayer* TfLiteParserImpl::AddReshapeLayer(armnn::IConnectableLayer* layer,
5218 unsigned int outputSlot,
5219 std::string reshapeLayerName,
5220 armnn::TensorInfo outputShape)
5221{
5222 ReshapeDescriptor desc;
5223 desc.m_TargetShape = outputShape.GetShape();
5224
5225 IConnectableLayer* reshapeLayer =
5226 m_Network->AddReshapeLayer(desc, reshapeLayerName.c_str());
5227
5228 auto & prevOutputSlot = layer->GetOutputSlot(outputSlot);
5229 prevOutputSlot.Connect(reshapeLayer->GetInputSlot(0));
5230 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputShape);
5231 return reshapeLayer;
5232}
5233
Kevin May7d96b162021-02-03 17:38:41 +00005234armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
5235 unsigned int outputSlot,
5236 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01005237{
5238 ActivationDescriptor activationDesc;
5239 std::string layerName = prevLayer->GetName();
5240
5241 switch(activationType)
5242 {
5243 case tflite::ActivationFunctionType_NONE:
5244 {
5245 // this is a no-op: return previous layer
5246 return prevLayer;
5247 }
5248 case tflite::ActivationFunctionType_RELU:
5249 {
5250 activationDesc.m_Function = ActivationFunction::ReLu;
5251 layerName += ":RELU";
5252 break;
5253 }
5254 case tflite::ActivationFunctionType_RELU6:
5255 {
5256 activationDesc.m_Function = ActivationFunction::BoundedReLu;
5257 activationDesc.m_A = 6.0f;
5258 activationDesc.m_B = 0.0f;
5259 layerName += ":RELU6";
5260 break;
5261 }
5262 case tflite::ActivationFunctionType_TANH:
5263 {
5264 activationDesc.m_Function = ActivationFunction::TanH;
5265 activationDesc.m_A = 1.0f;
5266 activationDesc.m_B = 1.0f;
5267 layerName += ":TANH";
5268 break;
5269 }
5270
5271 // I only put these here as a reminder what others we could support
5272 case tflite::ActivationFunctionType_RELU_N1_TO_1:
5273 case tflite::ActivationFunctionType_SIGN_BIT:
5274 default:
5275 {
5276 throw ParseException(
Mike Kelly377fb212023-01-10 15:55:28 +00005277 fmt::format("TfLite parser doesn't support fused activation: "
James Ward58dec6b2020-09-11 17:32:44 +01005278 "{}/{} {} ",
5279 activationType,
5280 tflite::EnumNameActivationFunctionType(activationType),
5281 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005282
5283 }
5284 }
5285
5286 IConnectableLayer* activationLayer =
5287 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
5288
5289 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
5290 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
5291 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
5292 return activationLayer;
5293}
5294
Teresa Charlincdbd40b2022-02-25 13:21:55 +00005295armnn::IConnectableLayer* TfLiteParserImpl::AddFusedFloorLayer(armnn::IConnectableLayer* prevLayer,
5296 unsigned int outputSlot)
5297{
Teresa Charlin725728e2022-05-05 13:33:33 +01005298
5299 auto& prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
5300 DataType dataType = prevOutputSlot.GetTensorInfo().GetDataType();
5301
5302 if (dataType == DataType::Signed32)
5303 {
5304 return prevLayer;
5305 }
5306
Teresa Charlincdbd40b2022-02-25 13:21:55 +00005307 std::string layerName = prevLayer->GetName();
5308 IConnectableLayer* floorLayer = m_Network->AddFloorLayer(layerName.c_str());
5309
Teresa Charlincdbd40b2022-02-25 13:21:55 +00005310 prevOutputSlot.Connect(floorLayer->GetInputSlot(0));
5311 floorLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
Teresa Charlin725728e2022-05-05 13:33:33 +01005312
Teresa Charlincdbd40b2022-02-25 13:21:55 +00005313 return floorLayer;
5314}
5315
Mike Kelly0d77ae12022-01-07 17:42:27 +00005316TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char* fileName)
telsoa01c577f2c2018-08-31 09:22:23 +01005317{
5318 if (fileName == nullptr)
5319 {
James Ward58dec6b2020-09-11 17:32:44 +01005320 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01005321 CHECK_LOCATION().AsString()));
5322 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01005323 std::error_code errorCode;
5324 fs::path pathToFile(fileName);
5325 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01005326 {
James Ward58dec6b2020-09-11 17:32:44 +01005327 //fmt::format() could not be used here (format error)
5328 std::stringstream msg;
5329 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
5330 << " " << CHECK_LOCATION().AsString();
James Ward58dec6b2020-09-11 17:32:44 +01005331 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01005332 }
Colm Donelan0dfb2652023-06-22 10:19:17 +01005333 if (!fs::is_regular_file(pathToFile))
5334 {
5335 // Exclude non regular files.
5336 throw InvalidArgumentException(fmt::format("File \"{}\" is not a regular file and cannot be loaded.",
5337 pathToFile.c_str()));
5338 }
5339
telsoa01c577f2c2018-08-31 09:22:23 +01005340 std::ifstream file(fileName, std::ios::binary);
5341 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
5342 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
5343 fileContent.size());
5344}
5345
Mike Kelly0d77ae12022-01-07 17:42:27 +00005346TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromBinary(const uint8_t* binaryContent, size_t len)
telsoa01c577f2c2018-08-31 09:22:23 +01005347{
5348 if (binaryContent == nullptr)
5349 {
James Ward58dec6b2020-09-11 17:32:44 +01005350 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01005351 CHECK_LOCATION().AsString()));
5352 }
5353 flatbuffers::Verifier verifier(binaryContent, len);
5354 if (verifier.VerifyBuffer<tflite::Model>() == false)
5355 {
5356 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005357 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
5358 "flatbuffers format. size:{} {}",
5359 len,
5360 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005361 }
5362 return tflite::UnPackModel(binaryContent);
5363}
5364
Mike Kelly0d77ae12022-01-07 17:42:27 +00005365TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetInputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00005366 size_t subgraphIndex,
5367 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005368{
5369 CHECK_MODEL(model, subgraphIndex, operatorIndex);
5370
Mike Kelly0d77ae12022-01-07 17:42:27 +00005371 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5372 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01005373
5374 size_t inputCount = operatorPtr->inputs.size();
mathad01c21025d2021-04-26 10:09:37 +01005375 TensorRawPtrVector result;
Mike Kelly0d77ae12022-01-07 17:42:27 +00005376 for (size_t i = 0; i < inputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005377 {
mathad01c21025d2021-04-26 10:09:37 +01005378 // If the input location is -1 then assume input is turned off.
5379 if (operatorPtr->inputs[i] == -1)
5380 {
5381 continue;
5382 }
5383 else
5384 {
5385 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
5386 result.push_back(subgraphPtr->tensors[inputId].get());
5387 }
telsoa01c577f2c2018-08-31 09:22:23 +01005388 }
5389 return result;
5390}
5391
Mike Kelly0d77ae12022-01-07 17:42:27 +00005392TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetOutputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00005393 size_t subgraphIndex,
5394 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005395{
5396 CHECK_MODEL(model, subgraphIndex, operatorIndex);
5397
Mike Kelly0d77ae12022-01-07 17:42:27 +00005398 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5399 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01005400
5401 size_t outputCount = operatorPtr->outputs.size();
5402 TensorRawPtrVector result(outputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005403 for (size_t i = 0; i < outputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005404 {
5405 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
5406 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01005407 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01005408 }
5409 return result;
5410}
5411
Mike Kelly0d77ae12022-01-07 17:42:27 +00005412TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphInputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00005413 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005414{
5415 CHECK_SUBGRAPH(model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005416 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01005417
Derek Lambertiff05cc52019-04-26 13:05:17 +01005418 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01005419 TensorIdRawPtrVector result(inputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005420 for (size_t i = 0; i < inputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005421 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01005422 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01005423 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01005424 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01005425 }
5426 return result;
5427}
5428
Mike Kelly0d77ae12022-01-07 17:42:27 +00005429TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphOutputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00005430 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005431{
5432 CHECK_SUBGRAPH(model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005433 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01005434
Derek Lambertiff05cc52019-04-26 13:05:17 +01005435 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01005436 TensorIdRawPtrVector result(outputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005437 for (size_t i = 0; i < outputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005438 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01005439 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
5440 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01005441 }
5442 return result;
5443}
5444
Kevin May7d96b162021-02-03 17:38:41 +00005445std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
5446 size_t subgraphIndex,
5447 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005448{
5449 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005450 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5451 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01005452 return operatorPtr->inputs;
5453}
5454
Kevin May7d96b162021-02-03 17:38:41 +00005455std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
5456 size_t subgraphIndex,
5457 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005458{
5459 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005460 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5461 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01005462 return operatorPtr->outputs;
5463}
5464
Kevin May7d96b162021-02-03 17:38:41 +00005465void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
5466 size_t operatorIndex,
5467 IConnectableLayer* layer,
Finn Williamsd4fa5452021-03-01 12:31:41 +00005468 const std::vector<unsigned int>& tensorIndexes,
5469 unsigned int startingSlotIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005470{
5471 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Ryan OSheac229b3f2023-06-27 22:34:54 +01005472
5473 if (!layer)
5474 {
5475 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5476 operatorIndex, CHECK_LOCATION().AsString()));
5477 }
Matthew Sloyan81beae32021-07-13 19:46:11 +01005478
Finn Williamsd4fa5452021-03-01 12:31:41 +00005479 if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots())
telsoa01c577f2c2018-08-31 09:22:23 +01005480 {
5481 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005482 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
5483 " for subgraph:{} operator index:{} {}",
5484 tensorIndexes.size(),
5485 layer->GetNumInputSlots(),
5486 subgraphIndex,
5487 operatorIndex,
5488 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005489 }
5490
Finn Williamsd4fa5452021-03-01 12:31:41 +00005491 for (unsigned int index = 0; index < tensorIndexes.size() ; ++index)
telsoa01c577f2c2018-08-31 09:22:23 +01005492 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00005493 unsigned int tensorIndex = tensorIndexes[index];
5494 armnn::IInputSlot* slot = &(layer->GetInputSlot(startingSlotIndex + index));
telsoa01c577f2c2018-08-31 09:22:23 +01005495 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
5496 }
5497}
5498
Kevin May7d96b162021-02-03 17:38:41 +00005499void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
5500 size_t operatorIndex,
5501 IConnectableLayer* layer,
5502 const std::vector<unsigned int>& tensorIndexes)
telsoa01c577f2c2018-08-31 09:22:23 +01005503{
5504 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Ryan OSheac229b3f2023-06-27 22:34:54 +01005505
5506 if (!layer)
5507 {
5508 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5509 operatorIndex, CHECK_LOCATION().AsString()));
5510 }
5511
telsoa01c577f2c2018-08-31 09:22:23 +01005512 if (tensorIndexes.size() != layer->GetNumOutputSlots())
5513 {
5514 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005515 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
5516 " for subgraph:{} operator index:{} {}",
5517 tensorIndexes.size(),
5518 layer->GetNumOutputSlots(),
5519 subgraphIndex,
5520 operatorIndex,
5521 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005522 }
5523
5524 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
5525 {
5526 unsigned int tensorIndex = tensorIndexes[slotIndex];
5527 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
5528 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
5529 }
5530}
5531
Mike Kelly377fb212023-01-10 15:55:28 +00005532void TfLiteParserImpl::SetupInputLayerTensorInfos(size_t subgraphIndex)
5533{
5534 CHECK_SUBGRAPH(m_Model, subgraphIndex);
5535
5536 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
5537 for (auto const& tensorIdAndPtr : inputs)
5538 {
5539 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
5540 m_TensorInfos.insert({tensorIdAndPtr.first, tensorInfo});
5541 }
5542}
5543
Kevin May7d96b162021-02-03 17:38:41 +00005544void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005545{
5546 CHECK_SUBGRAPH(m_Model, subgraphIndex);
5547
5548 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005549 for (auto const& tensorIdAndPtr : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005550 {
5551 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
5552 IConnectableLayer* layer =
5553 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
5554
5555 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
5556 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
5557
5558 RegisterOutputSlots(subgraphIndex,
5559 VIRTUAL_OPERATOR_ID,
5560 layer,
5561 { static_cast<uint32_t>(tensorIdAndPtr.first) });
5562 }
5563}
5564
Kevin May7d96b162021-02-03 17:38:41 +00005565void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005566{
5567 CHECK_SUBGRAPH(m_Model, subgraphIndex);
5568
5569 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005570 for (auto const& tensorIdAndPtr : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005571 {
5572 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
5573 IConnectableLayer* layer =
5574 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
5575
5576 RegisterInputSlots(subgraphIndex,
5577 VIRTUAL_OPERATOR_ID,
5578 layer,
5579 { static_cast<uint32_t>(tensorIdAndPtr.first) });
5580 }
5581}
5582
Mike Kelly377fb212023-01-10 15:55:28 +00005583void TfLiteParserImpl::SetupConstantLayerTensorInfos(size_t subgraph)
5584{
5585 CHECK_SUBGRAPH(m_Model, subgraph);
5586
5587 const auto & subgraphPtr = m_Model->subgraphs[subgraph];
5588 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
5589 {
5590 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
5591 {
5592 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
5593 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
5594 {
5595 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
5596
5597 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5598
5599 m_TensorInfos.insert({tensorIndex, tensorInfo});
5600 }
5601 }
5602 }
5603}
5604
Mike Kelly5880b912022-01-28 16:18:54 +00005605void TfLiteParserImpl::SetupConstantLayers(size_t subgraph)
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005606{
Mike Kelly5880b912022-01-28 16:18:54 +00005607 CHECK_SUBGRAPH(m_Model, subgraph);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005608
Mike Kelly5880b912022-01-28 16:18:54 +00005609 const auto & subgraphPtr = m_Model->subgraphs[subgraph];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005610 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
5611 {
5612 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
5613 {
5614 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
5615 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
5616 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01005617 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005618
Mike Kelly5880b912022-01-28 16:18:54 +00005619 if (IsConstTensor(tensorPtr))
Matthew Sloyan81beae32021-07-13 19:46:11 +01005620 {
5621 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
Mike Kelly5880b912022-01-28 16:18:54 +00005622 armnn::DataType dataType = tensorInfo.GetDataType();
5623
5624 if (std::find(m_ConstantsToDequantize.begin(), m_ConstantsToDequantize.end(), tensorPtr->buffer)
5625 != m_ConstantsToDequantize.end())
5626 {
5627 dataType = DataType::Float32;
5628 }
5629 auto tensorAndData = CreateConstTensorNonPermuted(tensorPtr, tensorInfo, dataType);
5630
5631 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
5632 IConnectableLayer *layer = m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
5633
5634 layer->GetOutputSlot(0).SetTensorInfo(tensorAndData.first.GetInfo());
5635 RegisterOutputSlots(subgraphIndex,
5636 VIRTUAL_OPERATOR_ID,
5637 layer,
5638 { tensorIndex });
5639 }
5640 else if (ShouldConstantTensorBeCreated(tensorIndex))
5641 {
5642 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5643 armnn::DataType dataType = tensorInfo.GetDataType();
5644
5645 if (std::find(m_ConstantsToDequantize.begin(), m_ConstantsToDequantize.end(), tensorPtr->buffer)
5646 != m_ConstantsToDequantize.end())
5647 {
5648 dataType = DataType::Float32;
5649 }
5650 // Make sure isConstant flag is set.
5651 tensorInfo.SetConstant();
5652 tensorInfo.SetDataType(dataType);
5653
5654 auto tensorAndData = ConstTensor(tensorInfo, std::vector<uint8_t>(tensorInfo.GetNumBytes()));
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005655
Matthew Sloyan81beae32021-07-13 19:46:11 +01005656 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005657 IConnectableLayer* layer = m_Network->AddConstantLayer(tensorAndData, layerName.c_str());
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005658
Matthew Sloyan81beae32021-07-13 19:46:11 +01005659 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
5660 RegisterOutputSlots(subgraphIndex,
5661 VIRTUAL_OPERATOR_ID,
5662 layer,
Mike Kelly5880b912022-01-28 16:18:54 +00005663 {tensorIndex});
Matthew Sloyan81beae32021-07-13 19:46:11 +01005664 }
5665 else
5666 {
5667 throw ParseException(
5668 fmt::format("Invalid Tensor: Tensor should be constant. {}",
5669 CHECK_LOCATION().AsString()));
5670 }
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005671 }
5672 }
5673 }
5674}
5675
telsoa01c577f2c2018-08-31 09:22:23 +01005676// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Kevin May7d96b162021-02-03 17:38:41 +00005677TfLiteParserImpl::BufferRawPtr TfLiteParserImpl::GetBuffer(const ModelPtr& model, size_t bufferIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005678{
5679 CHECK_BUFFER(model, bufferIndex);
5680 return model->buffers[bufferIndex].get();
5681}
5682
Matteo Martincigh747ef822018-12-18 09:26:39 +00005683template<typename T>
Kevin May7d96b162021-02-03 17:38:41 +00005684std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
5685TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
5686 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00005687 armnn::TensorInfo& tensorInfo,
5688 armnn::Optional<armnn::PermutationVector&> permutationVector)
5689{
Matthew Sloyan81beae32021-07-13 19:46:11 +01005690 // Make sure isConstant flag is set.
5691 tensorInfo.SetConstant();
5692
Matteo Martincigh747ef822018-12-18 09:26:39 +00005693 auto constData = CreateConstTensorImpl<T>(bufferPtr,
5694 tensorPtr,
5695 tensorInfo,
5696 permutationVector);
Kevin May7d96b162021-02-03 17:38:41 +00005697 TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
Matteo Martincigh747ef822018-12-18 09:26:39 +00005698 return std::make_pair(constData.first, std::move(storage));
5699}
5700
Mike Kelly5880b912022-01-28 16:18:54 +00005701bool TfLiteParserImpl::ShouldConstantTensorBeCreated(unsigned int tensorIndex)
5702{
5703 // If the TensorIndex appears in the list of ConstantsToBeCreated then return true
5704 return (std::find(m_ConstantsToBeCreated.begin(), m_ConstantsToBeCreated.end(), tensorIndex)
5705 != m_ConstantsToBeCreated.end());
5706}
5707
Finn Williamsd4fa5452021-03-01 12:31:41 +00005708bool TfLiteParserImpl::IsConstTensor(TensorRawPtr tensorPtr)
5709{
5710 CHECK_TENSOR_PTR(tensorPtr);
mathad01bf7edb62021-04-20 16:12:45 +01005711 bool isConst = true;
5712
5713 auto buffer = GetBuffer(m_Model, tensorPtr->buffer);
5714 if (buffer->data.size() == 0)
5715 {
5716 isConst = false;
5717 }
5718
5719 return isConst;
Finn Williamsd4fa5452021-03-01 12:31:41 +00005720}
5721
Kevin May7d96b162021-02-03 17:38:41 +00005722std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
Finn Williamsd4fa5452021-03-01 12:31:41 +00005723TfLiteParserImpl::CreateConstTensorPermuted(TensorRawPtr tensorPtr,
5724 armnn::TensorInfo& tensorInfo,
5725 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01005726{
5727 CHECK_TENSOR_PTR(tensorPtr);
5728 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5729 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5730
Matthew Sloyan81beae32021-07-13 19:46:11 +01005731 // Make sure isConstant flag is set.
5732 tensorInfo.SetConstant();
5733
telsoa01c577f2c2018-08-31 09:22:23 +01005734 switch (tensorInfo.GetDataType())
5735 {
5736 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005737 return CreateConstTensorAndStoreData<float>(bufferPtr,
5738 tensorPtr,
5739 tensorInfo,
5740 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00005741 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005742 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
5743 tensorPtr,
5744 tensorInfo,
5745 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00005746 case armnn::DataType::QSymmS8:
5747 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
5748 tensorPtr,
5749 tensorInfo,
5750 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00005751 case armnn::DataType::QAsymmS8:
5752 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
5753 tensorPtr,
5754 tensorInfo,
5755 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01005756 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005757 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
5758 tensorPtr,
5759 tensorInfo,
5760 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01005761 default:
5762 {
5763 std::stringstream errString;
5764 errString << "Unexpected datatype when creating const tensor: "
5765 << armnn::GetDataTypeName(tensorInfo.GetDataType())
5766 << " shape:" << tensorInfo.GetShape()
5767 << CHECK_LOCATION().AsString();
5768 throw ParseException(errString.str());
5769 }
5770 }
5771}
5772
Finn Williamsd4fa5452021-03-01 12:31:41 +00005773armnn::ConstTensor TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
5774 armnn::TensorInfo& tensorInfo)
5775{
5776 CHECK_TENSOR_PTR(tensorPtr);
5777 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5778 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5779
Matthew Sloyan81beae32021-07-13 19:46:11 +01005780 // Make sure isConstant flag is set.
5781 tensorInfo.SetConstant();
5782
Finn Williamsd4fa5452021-03-01 12:31:41 +00005783 return ConstTensor(tensorInfo, bufferPtr->data.data());
5784}
5785
Mike Kelly5880b912022-01-28 16:18:54 +00005786std::pair<armnn::ConstTensor, std::unique_ptr<float[]>>
5787TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
5788 armnn::TensorInfo& tensorInfo,
5789 armnn::DataType inputDataType)
5790{
5791 CHECK_TENSOR_PTR(tensorPtr);
5792 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5793 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5794
5795 // Make sure isConstant flag is set.
5796 tensorInfo.SetConstant();
5797
Mike Kelly0506ef02023-01-03 16:29:44 +00005798 if (inputDataType == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
Mike Kelly5880b912022-01-28 16:18:54 +00005799 {
Mike Kelly0506ef02023-01-03 16:29:44 +00005800 try
5801 {
5802 TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
5803 std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
5804 return std::make_pair(ConstTensor(constTensorInfo, data.get()), std::move(data));
5805 }
Cathal Corbett9c843c32023-01-09 17:51:37 +00005806 catch (InvalidArgumentException&)
Mike Kelly0506ef02023-01-03 16:29:44 +00005807 {
5808 throw ParseException(
5809 fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
5810 GetDataTypeName(DataType::Float32),
5811 GetDataTypeName(tensorInfo.GetDataType()),
5812 CHECK_LOCATION().AsString()));
5813 }
Mike Kelly5880b912022-01-28 16:18:54 +00005814 }
5815 else
5816 {
5817 return std::make_pair(ConstTensor(tensorInfo, bufferPtr->data.data()), std::unique_ptr<float[]>());
5818 }
5819}
5820
5821std::pair<armnn::ConstTensor*, std::unique_ptr<float[]>>
5822TfLiteParserImpl::CreateConstTensorPtr(TensorRawPtr tensorPtr, armnn::TensorInfo& inputTensorInfo)
5823{
5824 CHECK_TENSOR_PTR(tensorPtr);
5825 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5826 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5827 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5828
5829 // Make sure isConstant flag is set.
5830 tensorInfo.SetConstant();
5831
5832 if (inputTensorInfo.GetDataType() == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
5833 {
Mike Kelly0506ef02023-01-03 16:29:44 +00005834 try
5835 {
5836 TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
5837 std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
5838 return std::make_pair(new ConstTensor(constTensorInfo, data.get()), std::move(data));
5839 }
Cathal Corbett9c843c32023-01-09 17:51:37 +00005840 catch (InvalidArgumentException&)
Mike Kelly0506ef02023-01-03 16:29:44 +00005841 {
5842 throw ParseException(
5843 fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
5844 GetDataTypeName(DataType::Float32),
5845 GetDataTypeName(tensorInfo.GetDataType()),
5846 CHECK_LOCATION().AsString()));
5847 }
Mike Kelly5880b912022-01-28 16:18:54 +00005848 }
5849 else
5850 {
5851 return std::make_pair(new ConstTensor(tensorInfo, bufferPtr->data.data()), std::unique_ptr<float[]>());
5852 }
5853}
5854
Kevin May7d96b162021-02-03 17:38:41 +00005855BindingPointInfo TfLiteParserImpl::GetNetworkInputBindingInfo(size_t subgraphId,
5856 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01005857{
5858 CHECK_SUBGRAPH(m_Model, subgraphId);
5859 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005860 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005861 {
5862 if (input.second->name == name)
5863 {
5864 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
Colm Donelan4bc993b2021-11-09 20:39:10 +00005865 auto inputTensorInfo = ToTensorInfo(input.second);
5866 // Input tensors are always treated as constant tensors during network execution.
5867 inputTensorInfo.SetConstant(true);
5868 return std::make_pair(bindingId, inputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01005869 }
5870 }
5871
5872 std::stringstream bindings;
Mike Kelly0d77ae12022-01-07 17:42:27 +00005873 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005874 {
5875 bindings << "'" << input.second->name << "' ";
5876 }
5877
5878 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005879 fmt::format("No input binding found for subgraph:{} and name:{}. "
5880 "Possible inputs are: [{}] {}",
5881 subgraphId,
5882 name,
5883 bindings.str(),
5884 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005885}
5886
Kevin May7d96b162021-02-03 17:38:41 +00005887BindingPointInfo TfLiteParserImpl::GetNetworkOutputBindingInfo(size_t subgraphId,
5888 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01005889{
5890 CHECK_SUBGRAPH(m_Model, subgraphId);
5891 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005892 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005893 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005894 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01005895 if (output.second->name == name)
5896 {
5897 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Mike Kelly377fb212023-01-10 15:55:28 +00005898 std::vector<unsigned int> shape = m_OverriddenOutputShapes.size() > 0 ?
5899 m_OverriddenOutputShapes[i] : AsUnsignedVector(output.second->shape);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005900 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01005901 }
5902 }
5903
5904 std::stringstream bindings;
Mike Kelly0d77ae12022-01-07 17:42:27 +00005905 for (auto const& output : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005906 {
5907 bindings << "'" << output.second->name << "' ";
5908 }
5909
5910 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005911 fmt::format("No output binding found for subgraph:{} and name:{}. "
5912 "Possible outputs are: [{}] {}",
5913 subgraphId,
5914 name,
5915 bindings.str(),
5916 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005917}
5918
Kevin May7d96b162021-02-03 17:38:41 +00005919size_t TfLiteParserImpl::GetSubgraphCount() const
telsoa01c577f2c2018-08-31 09:22:23 +01005920{
5921 return m_Model->subgraphs.size();
5922}
5923
Kevin May7d96b162021-02-03 17:38:41 +00005924std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01005925{
5926 CHECK_SUBGRAPH(m_Model, subgraphId);
5927 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
5928 std::vector<std::string> result;
5929 result.reserve(inputs.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +00005930 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005931 {
5932 result.push_back(input.second->name);
5933 }
5934 return result;
5935}
5936
Kevin May7d96b162021-02-03 17:38:41 +00005937std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01005938{
5939 CHECK_SUBGRAPH(m_Model, subgraphId);
5940 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
5941 std::vector<std::string> result;
5942 result.reserve(outputs.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +00005943 for (auto const& output : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005944 {
5945 result.push_back(output.second->name);
5946 }
5947 return result;
5948}
5949
Matthew Sloyanac001ee2021-02-03 10:43:04 +00005950const std::string TfLiteParserImpl::GetVersion()
5951{
5952 return TFLITE_PARSER_VERSION;
5953}
5954
Mike Kelly0d77ae12022-01-07 17:42:27 +00005955TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005956: m_FloatData(std::move(data))
5957, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00005958, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01005959, m_Int32Data(nullptr)
5960{
5961}
5962
Mike Kelly0d77ae12022-01-07 17:42:27 +00005963TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005964: m_FloatData(nullptr)
5965, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00005966, m_Int8Data(nullptr)
5967, m_Int32Data(nullptr)
5968{
5969}
5970
Mike Kelly0d77ae12022-01-07 17:42:27 +00005971TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]>&& data)
Keith Davisd305e1a2020-01-22 11:57:54 +00005972: m_FloatData(nullptr)
5973, m_Uint8Data(nullptr)
5974, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01005975, m_Int32Data(nullptr)
5976{
5977}
5978
Mike Kelly0d77ae12022-01-07 17:42:27 +00005979TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005980: m_FloatData(nullptr)
5981, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00005982, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01005983, m_Int32Data(std::move(data))
5984{
5985}
5986
5987} // armnnTfLiteParser