blob: 301989ebb7d7f20647c889e84198963c77754249 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kelly04d82292023-01-19 18:29:40 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Sloyanac001ee2021-02-03 10:43:04 +00008#include "armnnTfLiteParser/Version.hpp"
Mike Kelly5880b912022-01-28 16:18:54 +00009#include "armnn/LstmParams.hpp"
Matthew Sloyanac001ee2021-02-03 10:43:04 +000010
Sadik Armagand109a4d2020-07-28 10:42:13 +010011#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +000012#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010013#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000014#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010015#include <armnn/Tensor.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000016#include <armnnUtils/TensorUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010017#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010018#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000019#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010020#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010021
22// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000023#include <armnnUtils/Permute.hpp>
Rob Hughes9542f902021-07-14 09:48:54 +010024#include <armnnUtils/Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000025
Sadik Armagan479045b2018-10-01 11:51:37 +010026#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010027#include <VerificationHelpers.hpp>
28
29// The generated code based on the Tf Lite schema:
30#include <schema_generated.h>
31
Matteo Martincighe011d202019-11-28 11:35:47 +000032#include <flatbuffers/flexbuffers.h>
33
James Ward58dec6b2020-09-11 17:32:44 +010034#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010035
telsoa01c577f2c2018-08-31 09:22:23 +010036#include <algorithm>
Matthew Sloyanac001ee2021-02-03 10:43:04 +000037#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010038#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010039#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000040
41#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
42 { \
43 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
44 << ": " \
45 << CHECK_LOCATION().AsString()).str()); \
46 }
telsoa01c577f2c2018-08-31 09:22:23 +010047
48using namespace armnn;
49using armnn::CheckLocation;
50namespace armnnTfLiteParser
51{
Kevin May7d96b162021-02-03 17:38:41 +000052
53ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
54 pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
55
56ITfLiteParser::~ITfLiteParser() = default;
57
58ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
59{
60 return new ITfLiteParser(options);
61}
62
63ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
64{
65 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
66}
67
68void ITfLiteParser::Destroy(ITfLiteParser* parser)
69{
70 delete parser;
71}
72
73armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
74{
75 return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
76}
77
Mike Kelly0d77ae12022-01-07 17:42:27 +000078armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
Kevin May7d96b162021-02-03 17:38:41 +000079{
80 return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
81}
82
83BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
84 const std::string& name) const
85{
86 return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
87}
88
89BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
90 const std::string& name) const
91{
92 return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
93}
94
95size_t ITfLiteParser::GetSubgraphCount() const
96{
97 return pTfLiteParserImpl->GetSubgraphCount();
98}
99
100std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
101{
102 return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
103}
104
105std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
106{
107 return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
108}
109
telsoa01c577f2c2018-08-31 09:22:23 +0100110namespace
111{
jimfly01c25411c2018-11-14 17:47:22 +0000112
telsoa01c577f2c2018-08-31 09:22:23 +0100113const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
114
Mike Kelly0d77ae12022-01-07 17:42:27 +0000115void CheckSubgraph(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100116 size_t subgraphIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000117 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100118{
119 if (model.get() == nullptr)
120 {
121 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100122 fmt::format("{} was called with invalid (null) model. "
123 "Possible reason is that the model is not yet loaded and Unpack(ed). "
124 "subgraph:{} at {}",
125 location.m_Function,
126 subgraphIndex,
127 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100128 }
129 else if (subgraphIndex >= model->subgraphs.size())
130 {
131 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100132 fmt::format("{} was called with an invalid subgraph index. "
133 "subgraph:{} at {}",
134 location.m_Function,
135 subgraphIndex,
136 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100137 }
138}
139
140#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
141 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
142
Mike Kelly0d77ae12022-01-07 17:42:27 +0000143void CheckModel(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100144 size_t subgraphIndex,
145 size_t operatorIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000146 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100147{
148 if (model.get() == nullptr)
149 {
150 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100151 fmt::format("{} was called with invalid (null) model. "
152 "Possible reason is that the model is not yet loaded and Unpack(ed). "
153 "subgraph:{} operator:{} at {}",
154 location.m_Function,
155 subgraphIndex,
156 operatorIndex,
157 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100158 }
159 else if (subgraphIndex >= model->subgraphs.size())
160 {
161 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100162 fmt::format("{} was called with an invalid subgraph index. "
163 "subgraph:{} operator:{} at {}",
164 location.m_Function,
165 subgraphIndex,
166 operatorIndex,
167 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100168 }
169 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
170 operatorIndex != VIRTUAL_OPERATOR_ID)
171 {
172 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100173 fmt::format("{} was called with an invalid operator index. "
174 "subgraph:{} operator:{} at {}",
175 location.m_Function,
176 subgraphIndex,
177 operatorIndex,
178 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100179 }
180}
181
182#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
183 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
184
Mike Kelly0d77ae12022-01-07 17:42:27 +0000185void CheckTensor(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100186 size_t subgraphIndex,
187 size_t tensorIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000188 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100189{
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // the tensor index is the only one to check here
191 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
192 {
193 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100194 fmt::format("{} was called with an invalid tensor index. "
195 "subgraph:{} tensor:{} at {}",
196 location.m_Function,
197 subgraphIndex,
198 tensorIndex,
199 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100200 }
201}
202
203#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
204 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
205
Kevin May7d96b162021-02-03 17:38:41 +0000206void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000207 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100208{
209 if (rawPtr == nullptr)
210 {
211 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100212 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100213 }
214}
215
216#define CHECK_TENSOR_PTR(TENSOR_PTR) \
217 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
218
Mike Kelly0d77ae12022-01-07 17:42:27 +0000219void CheckBuffer(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100220 size_t bufferIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000221 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100222{
223 if (model.get() == nullptr)
224 {
225 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100226 fmt::format("{} was called with invalid (null) model. "
227 "Possible reason is that the model is not yet loaded and Unpack(ed). "
228 "buffer:{} at {}",
229 location.m_Function,
230 bufferIndex,
231 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100232 }
233 else if (bufferIndex >= model->buffers.size())
234 {
235 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100236 fmt::format("{} was called with an invalid buffer index. "
237 "buffer index:{} at {}",
238 location.m_Function,
239 bufferIndex,
240 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100241 }
242 else if (model->buffers[bufferIndex].get() == nullptr)
243 {
244 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100245 fmt::format("The buffer #{} is null. {}",
246 bufferIndex,
247 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100248 }
249}
250
251#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
252 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
253
Kevin May7d96b162021-02-03 17:38:41 +0000254void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000255 const armnn::TensorInfo& tensorInfo,
telsoa01c577f2c2018-08-31 09:22:23 +0100256 uint32_t bufferId,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000257 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100258{
259 if (bufferPtr == nullptr)
260 {
261 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100262 fmt::format("BufferPtr is null for buffer:{}. {}",
263 bufferId,
264 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100265 }
266 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
267 tensorInfo.GetNumBytes() > bufferPtr->data.size())
268 {
269 std::stringstream ss;
270 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
271 << "For tensor: " << tensorInfo.GetShape()
272 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
273 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
274 throw ParseException(ss.str());
275 }
276}
277
Mike Kelly0d77ae12022-01-07 17:42:27 +0000278
279tflite::BuiltinOperator GetOpCode(const TfLiteParserImpl::ModelPtr& model, size_t subgraphIndex, size_t operatorIndex)
280{
281 const auto& operatorPtr = model->subgraphs[subgraphIndex]->operators[operatorIndex];
282 auto opcodeIndex = operatorPtr->opcode_index;
283
284// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
285#if defined(ARMNN_POST_TFLITE_2_3)
286 auto opcode = std::max(model->operator_codes[opcodeIndex]->builtin_code,
287 static_cast<tflite::BuiltinOperator>(model->operator_codes[opcodeIndex]->deprecated_builtin_code));
288#else
289 auto opcode = model->operator_codes[opcodeIndex]->builtin_code;
290#endif
291 return opcode;
292}
293
294std::vector<unsigned int> GetUIntBuffer(armnn::TensorInfo info,
295 const TfLiteParserImpl::ModelPtr& model,
296 size_t bufferIndex)
297{
298 TfLiteParserImpl::BufferRawPtr bufferPtr = TfLiteParserImpl::GetBuffer(model, bufferIndex);
299 std::vector<unsigned int> buffer(info.GetNumElements());
300
301 if (info.GetDataType() == DataType::Signed32)
302 {
303 ::memcpy(buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
304 }
305 else if (info.GetDataType() == DataType::Signed64)
306 {
307 std::vector<uint64_t> uint64Buffer(info.GetNumElements());
308 ::memcpy(uint64Buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
309 buffer.assign(std::begin(uint64Buffer), std::end(uint64Buffer));
310 }
Mike Kelly0506ef02023-01-03 16:29:44 +0000311 else
312 {
313 CheckLocation location = CHECK_LOCATION();
314 throw ParseException(
315 fmt::format("Unsupported data type for uint buffer {}, only Signed 32 or Signed 64 are supported. {}",
316 GetDataTypeName(info.GetDataType()),
317 location.AsString()));
318 }
Mike Kelly0d77ae12022-01-07 17:42:27 +0000319 return buffer;
320}
321
telsoa01c577f2c2018-08-31 09:22:23 +0100322#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
323 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
324
325bool IsActivationSupported(tflite::ActivationFunctionType activationType)
326{
327 switch(activationType)
328 {
329 case tflite::ActivationFunctionType_NONE:
330 case tflite::ActivationFunctionType_RELU:
331 case tflite::ActivationFunctionType_RELU6:
332 case tflite::ActivationFunctionType_TANH:
333 {
334 return true;
335 }
336 default:
337 {
338 return false;
339 }
340 }
341}
342
343#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
344 do { \
345 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
346 { \
347 throw ParseException( \
Mike Kelly377fb212023-01-10 15:55:28 +0000348 fmt::format("TfLite parser doesn't support fused activation: " \
James Ward58dec6b2020-09-11 17:32:44 +0100349 "{}/{} in {} subgraph:{} operator:{} at {}", \
350 OPTION->fused_activation_function, \
351 tflite::EnumNameActivationFunctionType(\
352 OPTION->fused_activation_function), \
353 __func__, \
354 SUBGRAPH_INDEX, \
355 OPERATOR_INDEX, \
356 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100357 } \
358 } while(false)
359
360
Mike Kelly0d77ae12022-01-07 17:42:27 +0000361std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t>& in)
telsoa01c577f2c2018-08-31 09:22:23 +0100362{
363 std::vector<unsigned int> result;
364 result.reserve(in.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +0000365 for (auto& i : in)
telsoa01c577f2c2018-08-31 09:22:23 +0100366 {
mathad01c21025d2021-04-26 10:09:37 +0100367 // If the location of the input data is -1 then the input should be ignored.
368 if (i == -1)
369 {
370 continue;
371 }
telsoa01c577f2c2018-08-31 09:22:23 +0100372 result.push_back(CHECKED_NON_NEGATIVE(i));
373 }
374 return result;
375}
376
Mike Kelly5880b912022-01-28 16:18:54 +0000377bool IsOptionalOperandPresent(int input)
378{
379 return (input >= 0);
380}
381
telsoa01c577f2c2018-08-31 09:22:23 +0100382void CalcPadding(uint32_t inputSize,
383 uint32_t filterSize,
384 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100385 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100386 uint32_t& paddingFront,
387 uint32_t& paddingBack,
388 tflite::Padding padding)
389{
390 paddingFront = 0;
391 paddingBack = 0;
392 if (padding == tflite::Padding_SAME)
393 {
394 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100395 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
396 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100397 if (temp > inputSize)
398 {
399 paddingFront = (temp - inputSize) / 2;
400 paddingBack = (temp - inputSize) - paddingFront;
401 }
402 }
403}
404
Teresa Charlin024ef0b2023-04-26 11:19:03 +0100405// Function that calculates explicit padding when the output shape is known.
406// At the moment the output is only given as an input parameter in Transpose Convolution,
407// not in Convolution and Depthwise Convolution
408void CalcPadding(uint32_t inputSize,
409 uint32_t filterSize,
410 uint32_t stride,
411 uint32_t dilation,
412 uint32_t& paddingFront,
413 uint32_t& paddingBack,
414 tflite::Padding padding,
415 uint32_t outputSize)
416{
417 IgnoreUnused(dilation);
418 paddingFront = 0;
419 paddingBack = 0;
420 if (padding == tflite::Padding_SAME)
421 {
422 uint32_t totalPadding = (inputSize - 1) * stride + filterSize - outputSize;
423 paddingFront = totalPadding / 2;
424 paddingBack = totalPadding - paddingFront;
425 }
426}
427
Kevin May7d96b162021-02-03 17:38:41 +0000428armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Finn Williamsb49ed182021-06-29 15:50:08 +0100429 const std::vector<unsigned int>& shape,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100430 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100431{
432 armnn::DataType type;
433 CHECK_TENSOR_PTR(tensorPtr);
434
435 switch (tensorPtr->type)
436 {
437 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000438 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100439 break;
440 case tflite::TensorType_FLOAT32:
441 type = armnn::DataType::Float32;
442 break;
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100443 case tflite::TensorType_FLOAT16:
444 type = armnn::DataType::Float16;
445 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000446 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000447 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000448 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000449 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000450 type = armnn::DataType::QAsymmS8;
451 }
452 else
453 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000454 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000455 type = armnn::DataType::QSymmS8;
456 }
Finn Williamsed66d142019-12-06 09:55:55 +0000457 break;
458 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000459 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000460 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100461 case tflite::TensorType_INT32:
462 type = armnn::DataType::Signed32;
463 break;
Inki Daed4619e22020-09-10 15:33:54 +0900464 case tflite::TensorType_INT64:
465 type = armnn::DataType::Signed64;
466 break;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100467 case tflite::TensorType_BOOL:
468 type = armnn::DataType::Boolean;
469 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100470 default:
471 {
472 CheckLocation location = CHECK_LOCATION();
473 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100474 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
475 tensorPtr->type,
476 tflite::EnumNameTensorType(tensorPtr->type),
477 tensorPtr->name,
478 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100479 }
480 }
Finn Williamsb49ed182021-06-29 15:50:08 +0100481 TensorShape tensorShape;
482
483 std::vector<unsigned int> safeShape = shape;
484 if (shape.size() == 0)
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100485 {
486 safeShape.push_back(1);
Finn Williamsb49ed182021-06-29 15:50:08 +0100487 }
488
489 if (!outputTensor)
490 {
491 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()), safeShape.data());
492 }
493 else
494 {
Rob Hughesd812a312021-08-06 13:10:53 +0100495 size_t shapeSignatureSize = tensorPtr->shape_signature.size();
Finn Williamsb49ed182021-06-29 15:50:08 +0100496
497 // If a shape signature exists we will use that to infer dynamic tensors
498 if (shapeSignatureSize != 0)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100499 {
Finn Williamsb49ed182021-06-29 15:50:08 +0100500 // If the shape is incompatible with the shape signature override the shape
501 if (shapeSignatureSize != shape.size())
502 {
503 safeShape = {};
504
505 for (unsigned int i = 0; i < shapeSignatureSize; ++i)
506 {
507 unsigned int dim = tensorPtr->shape_signature[i] > -1 ?
508 static_cast<unsigned int>(tensorPtr->shape_signature[i]) : 0;
509 safeShape.push_back(dim);
510 }
511 }
512
Rob Hughesd812a312021-08-06 13:10:53 +0100513 std::unique_ptr<bool[]> dimMask = std::make_unique<bool[]>(tensorPtr->shape_signature.size());
Mike Kelly04d82292023-01-19 18:29:40 +0000514 bool batchOnly = true;
Finn Williamsb49ed182021-06-29 15:50:08 +0100515 for (unsigned int i = 0; i < tensorPtr->shape_signature.size(); ++i)
516 {
Mike Kelly04d82292023-01-19 18:29:40 +0000517 dimMask[i] = tensorPtr->shape_signature[i] != -1;
518
519 if (i > 0 && !dimMask[i])
520 {
521 batchOnly = false;
522 }
523 }
524 if (batchOnly)
525 {
526 dimMask[0] = true;
Finn Williamsb49ed182021-06-29 15:50:08 +0100527 }
Rob Hughesd812a312021-08-06 13:10:53 +0100528 tensorShape = TensorShape(static_cast<unsigned int>(safeShape.size()), safeShape.data(), dimMask.get());
Finn Williamsb49ed182021-06-29 15:50:08 +0100529 }
530 // If there is no shape signature treat the tensor as dynamic if the shape has a size of zero
531 else if (shape.size() == 0)
532 {
533 tensorShape = TensorShape(1, false);
534 }
535 else
536 {
537 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(shape.size()), shape.data());
Sadik Armagand109a4d2020-07-28 10:42:13 +0100538 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100539 }
540
Teresa Charlinacb3ec52023-04-03 19:57:00 +0100541 float quantizationScale = 1.0f;
Keith Davisd305e1a2020-01-22 11:57:54 +0000542 int32_t quantizationOffset = 0;
543
544 if (tensorPtr->quantization.get())
545 {
546 if (tensorPtr->quantization->scale.size() <= 1)
547 {
548 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
549 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
550
551 if (tensorPtr->quantization->scale.size() == 1)
552 {
553 quantizationScale = tensorPtr->quantization->scale[0];
554 }
555 if (tensorPtr->quantization->zero_point.size() == 1)
556 {
557 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000558 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100559 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000560 }
561
Sadik Armagand109a4d2020-07-28 10:42:13 +0100562 armnn::TensorInfo result(tensorShape,
563 type,
564 quantizationScale,
565 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000566 return result;
567 }
568 else
569 {
570 std::vector<float> quantizationScales;
571 std::vector<int32_t> quantizationOffsets;
572
573 // Scale
574 std::copy(tensorPtr->quantization->scale.begin(),
575 tensorPtr->quantization->scale.end(),
576 std::back_inserter(quantizationScales));
577
Keith Davis0c2eeac2020-02-11 16:51:50 +0000578 // QSymmS8 Per-axis
Sadik Armagand109a4d2020-07-28 10:42:13 +0100579 armnn::TensorInfo result(tensorShape,
580 type,
581 quantizationScales,
Jan Eilers7612bd62021-04-06 17:29:03 +0100582 armnn::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
Keith Davisd305e1a2020-01-22 11:57:54 +0000583 return result;
584 }
585 }
586 else
587 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100588 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000589 type,
590 quantizationScale,
591 quantizationOffset);
592 return result;
593 }
telsoa01c577f2c2018-08-31 09:22:23 +0100594}
595
Kevin May7d96b162021-02-03 17:38:41 +0000596armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Mike Kelly377fb212023-01-10 15:55:28 +0000597 const bool outputTensor = false)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100598{
Mike Kelly0d77ae12022-01-07 17:42:27 +0000599 auto const& dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100600 return ToTensorInfo(tensorPtr, dimensions, outputTensor);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100601}
602
telsoa01c577f2c2018-08-31 09:22:23 +0100603template<typename T>
604std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
Kevin May7d96b162021-02-03 17:38:41 +0000605CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
606 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000607 armnn::TensorInfo& tensorInfo,
608 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100609{
Jan Eilers8eb25602020-03-09 12:13:48 +0000610 IgnoreUnused(tensorPtr);
Ryan OSheac229b3f2023-06-27 22:34:54 +0100611
612 if (!tensorPtr)
613 {
614 throw armnn::ParseException(fmt::format("Tensor pointer is null {}", CHECK_LOCATION().AsString()));
615 }
616
617 if (!bufferPtr)
618 {
619 throw armnn::ParseException(fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
620 }
telsoa01c577f2c2018-08-31 09:22:23 +0100621
622 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000623
624 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
625 {
626 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000627 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
628 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000629 }
630 else
631 {
632 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
633 }
634
Matthew Sloyan81beae32021-07-13 19:46:11 +0100635 // Make sure isConstant flag is set.
636 tensorInfo.SetConstant();
637
telsoa01c577f2c2018-08-31 09:22:23 +0100638 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
639}
640
telsoa01c577f2c2018-08-31 09:22:23 +0100641armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
642{
643 // generate the binding id by shifting the tensor id by 8 bit
644 // and add the subgraph id, which allows 256 subgraphs
645 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
646}
647
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000648bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
649{
650 const unsigned int actualSize = actual.GetNumDimensions();
651 if (actualSize != expected.size())
652 {
653 return false;
654 }
655
656 for (unsigned int i = 0u; i < actualSize; i++)
657 {
658 if (expected[i] < 0 ||
659 actual[i] != static_cast<unsigned int>(expected[i]))
660 {
661 return false;
662 }
663 }
664
665 return true;
666}
667
Cathal Corbett2b922e22022-09-23 15:49:24 +0100668bool CheckShape(const armnn::TensorShape& actual, const armnn::TensorShape& expected)
669{
670 std::vector<int32_t> expectedVec;
671 for (uint32_t i = 0; i < expected.GetNumDimensions(); i++)
672 {
673 expectedVec.push_back(expected[i]);
674 }
675 return CheckShape(actual, expectedVec);
676}
677
James Conroy05102392020-06-24 15:39:55 +0100678void CheckMatchingQuantization(const TensorInfo& first,
679 const TensorInfo& second,
680 const std::string& descName,
681 std::string const& firstName,
682 std::string const& secondName)
683{
684 if (!first.IsQuantized() ||
685 !second.IsQuantized())
686 {
687 // Not a quantized type, ignore the validation
688 return;
689 }
690
691 DataType firstDataType = first.GetDataType();
692 DataType secondDataType = second.GetDataType();
693
694 if (firstDataType != secondDataType)
695 {
696 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
697 " must be of the same quantized type, " +
698 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
699 secondName + " is " + GetDataTypeName(secondDataType));
700 }
701
702 if (!first.IsTypeSpaceMatch(second))
703 {
704 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
705 " must have the same quantization space, " +
706 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
707 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
708 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
709 " and scale " + std::to_string(second.GetQuantizationScale()));
710 }
711}
712
Mike Kelly377fb212023-01-10 15:55:28 +0000713bool IsDynamic(TfLiteParserImpl::TensorRawPtr tensorPtr)
714{
715 auto shape = tensorPtr->shape;
716
717 if (shape.empty())
718 {
719 return true;
720 }
721 auto shapeSig = tensorPtr->shape_signature;
722
723 if (shapeSig.empty())
724 {
725 return false;
726 }
727
728 for (unsigned int i = 0; i < shapeSig.size() ; ++i)
729 {
730 if (shapeSig[i] == -1)
731 {
732 return true;
733 }
734 }
735 return false;
736}
737
telsoa01c577f2c2018-08-31 09:22:23 +0100738} // <anonymous>
739
Kevin May7d96b162021-02-03 17:38:41 +0000740TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100741: m_Options(options)
742, m_Network(nullptr, nullptr)
Kevin May7d96b162021-02-03 17:38:41 +0000743, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
telsoa01c577f2c2018-08-31 09:22:23 +0100744{
745 // register supported operators
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100746 m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
Kevin May7d96b162021-02-03 17:38:41 +0000747 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100748 m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
749 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
Kevin May7d96b162021-02-03 17:38:41 +0000750 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
751 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
Samuel Yapfd3ba5a2022-08-24 17:04:34 +0100752 m_ParserFunctions[tflite::BuiltinOperator_BATCH_MATMUL] = &TfLiteParserImpl::ParseBatchMatMul;
Teresa Charlin93f0ad02023-03-23 15:28:02 +0000753 m_ParserFunctions[tflite::BuiltinOperator_CEIL] = &TfLiteParserImpl::ParseCeil;
mathad01b392e982021-04-07 12:07:30 +0100754 m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
Kevin May7d96b162021-02-03 17:38:41 +0000755 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
756 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100757 // Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
Cathal Corbett80b4ef02022-05-25 11:21:11 +0100758 #if defined(ARMNN_POST_TFLITE_2_4)
Matthew Sloyaneb5f8102021-10-05 17:31:42 +0100759 m_ParserFunctions[tflite::BuiltinOperator_CONV_3D] = &TfLiteParserImpl::ParseConv3D;
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100760 #endif
Kevin May7d96b162021-02-03 17:38:41 +0000761 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
762 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
763 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
764 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100765 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000766 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300767 m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000768 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
Teresa Charlin3ab85482021-06-08 16:59:29 +0100769 m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
Teresa Charlincdbd40b2022-02-25 13:21:55 +0000770 m_ParserFunctions[tflite::BuiltinOperator_FLOOR_DIV] = &TfLiteParserImpl::ParseFloorDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000771 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
772 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
Teresa Charlin91a53ea2022-04-25 15:47:29 +0100773 m_ParserFunctions[tflite::BuiltinOperator_GATHER_ND] = &TfLiteParserImpl::ParseGatherNd;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300774 m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
775 m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000776 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
777 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300778 m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
779 m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
Mike Kelly31dce2b2021-09-01 21:22:37 +0100780 m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
781 = &TfLiteParserImpl::ParseLocalResponseNormalization;
Teresa Charlin28aa6692022-07-12 11:18:44 +0100782 m_ParserFunctions[tflite::BuiltinOperator_LOG] = &TfLiteParserImpl::ParseLog;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100783 m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
Kevin May7d96b162021-02-03 17:38:41 +0000784 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
Teresa Charlinfd33a692022-06-29 15:35:57 +0100785 m_ParserFunctions[tflite::BuiltinOperator_LOG_SOFTMAX] = &TfLiteParserImpl::ParseLogSoftmax;
Kevin May7d96b162021-02-03 17:38:41 +0000786 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
787 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
788 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
789 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
790 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +0100791 m_ParserFunctions[tflite::BuiltinOperator_MIRROR_PAD] = &TfLiteParserImpl::ParseMirrorPad;
Kevin May7d96b162021-02-03 17:38:41 +0000792 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
793 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300794 m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000795 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
796 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
Mike Kelly0d77ae12022-01-07 17:42:27 +0000797 m_ParserFunctions[tflite::BuiltinOperator_PADV2] = &TfLiteParserImpl::ParsePad;
John Mcloughlin0ec00872023-05-15 17:03:49 +0100798 m_ParserFunctions[tflite::BuiltinOperator_POW] = &TfLiteParserImpl::ParsePower;
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +0100799 m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
Kevin May7d96b162021-02-03 17:38:41 +0000800 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
801 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
802 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
Sadik Armagana2747482021-02-09 10:28:54 +0000803 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
804 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
Teresa Charlin4e3e8312021-08-05 12:34:37 +0100805 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
Kevin May7d96b162021-02-03 17:38:41 +0000806 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
807 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
808 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
Tianle Chenge5a30ff2023-07-03 11:24:12 +0100809 m_ParserFunctions[tflite::BuiltinOperator_REVERSE_V2] = &TfLiteParserImpl::ParseReverseV2;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100810 m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
Teresa Charlinf0fce5b2022-05-04 17:24:43 +0100811 m_ParserFunctions[tflite::BuiltinOperator_SQRT] = &TfLiteParserImpl::ParseSqrt;
Keith Davis0176fd82021-06-01 17:36:32 +0100812 m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
Teresa Charlin28aa6692022-07-12 11:18:44 +0100813 m_ParserFunctions[tflite::BuiltinOperator_SIN] = &TfLiteParserImpl::ParseSin;
Kevin May7d96b162021-02-03 17:38:41 +0000814 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
815 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
816 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
Teresa Charlin2a764ad2023-02-24 18:17:31 +0000817 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_DEPTH] = &TfLiteParserImpl::ParseSpaceToDepth;
Kevin May7d96b162021-02-03 17:38:41 +0000818 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
819 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
820 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
Teresa Charlin6963b332023-07-11 11:35:41 +0100821 m_ParserFunctions[tflite::BuiltinOperator_SQUARE] = &TfLiteParserImpl::ParseSquare;
John Mcloughlin0ec00872023-05-15 17:03:49 +0100822 m_ParserFunctions[tflite::BuiltinOperator_SQUARED_DIFFERENCE] = &TfLiteParserImpl::ParseSquaredDifference;
Kevin May7d96b162021-02-03 17:38:41 +0000823 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
824 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
825 m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
826 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
Teresa Charlin777008b2023-07-26 10:07:55 +0100827 m_ParserFunctions[tflite::BuiltinOperator_TILE] = &TfLiteParserImpl::ParseTile;
Kevin May7d96b162021-02-03 17:38:41 +0000828 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
829 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
Mike Kelly5880b912022-01-28 16:18:54 +0000830 m_ParserFunctions[tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM]
831 = &TfLiteParserImpl::ParseUnidirectionalSequenceLSTM;
Kevin May7d96b162021-02-03 17:38:41 +0000832 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100833
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100834 // register supported custom operators
Kevin May7d96b162021-02-03 17:38:41 +0000835 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100836}
837
Mike Kelly377fb212023-01-10 15:55:28 +0000838armnn::TensorInfo TfLiteParserImpl::InputTensorInfo(size_t subgraphIndex,
839 size_t operatorIndex,
840 int input)
841{
842 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
843 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
844
845 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[input]);
846 auto search = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(inputId);
847
848 if (search != m_TensorInfos.end())
849 {
850 return m_TensorInfos[inputId];
851 }
852 else
853 {
854 auto tensorInfo = ::armnnTfLiteParser::ToTensorInfo(subgraphPtr->tensors[inputId].get());
855 m_TensorInfos.insert({ inputId, tensorInfo });
856 return tensorInfo;
857 }
858}
859
860armnn::TensorInfo TfLiteParserImpl::OutputTensorInfoFromInputs(size_t subgraphIndex,
861 size_t operatorIndex,
862 armnn::IConnectableLayer* layer,
863 int output,
864 std::vector<int> inputs)
865{
866 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
867 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
868
869 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[output]);
870
871 auto outputSearch = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(outputId);
872
873 if (outputSearch != m_TensorInfos.end())
874 {
875 return m_TensorInfos[outputId];
876 }
877
878 const auto& outputTensorPtr = subgraphPtr->tensors[outputId].get();
879 TensorInfo tensor = ::armnnTfLiteParser::ToTensorInfo(outputTensorPtr, true);
880
881 if (IsDynamic(outputTensorPtr))
882 {
883 if (inputs.empty())
884 {
885 for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
886 {
887 inputs.emplace_back(i);
888 }
889 }
890 auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex);
891 std::vector<armnn::TensorShape> inputShapes;
892
893 for (unsigned int i = 0; i < inputs.size(); ++i)
894 {
895 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[inputs[i]]);
896 auto search = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(inputId);
897
898 if (search != m_TensorInfos.end())
899 {
900 auto &inputTensorInfo = m_TensorInfos[inputId];
901 inputShapes.push_back(inputTensorInfo.GetShape());
902 }
903 else
904 {
Mike Kelly377fb212023-01-10 15:55:28 +0000905 auto inputTensorInfo = ::armnnTfLiteParser::ToTensorInfo(subgraphPtr->tensors[inputId].get());
906 m_TensorInfos.insert({ inputId, inputTensorInfo});
907 inputShapes.push_back(inputTensorInfo.GetShape());
908 }
909 }
910 const auto outputShape = layer->InferOutputShapes(inputShapes)[output];
911 tensor.SetShape(outputShape);
912 }
913 m_TensorInfos.insert({ outputId, tensor});
914 return tensor;
915}
916
917armnn::TensorInfo TfLiteParserImpl::OutputTensorInfoFromShapes(size_t subgraphIndex,
918 size_t operatorIndex,
919 armnn::IConnectableLayer* layer,
920 int output,
921 std::vector<armnn::TensorShape> inputShapes)
922{
923 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
924 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
925
926 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[output]);
927 const auto& outputTensorPtr = subgraphPtr->tensors[outputId].get();
928 TensorInfo tensor = ::armnnTfLiteParser::ToTensorInfo(outputTensorPtr, true);
929
930 if (IsDynamic(outputTensorPtr))
931 {
932 const auto outputShape = layer->InferOutputShapes(inputShapes)[output];
933 tensor.SetShape(outputShape);
934 }
935 m_TensorInfos.insert({ outputId, tensor});
936 return tensor;
937}
938
Kevin May7d96b162021-02-03 17:38:41 +0000939void TfLiteParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100940{
941 m_Network = armnn::INetworkPtr(nullptr, nullptr);
942 m_Model = nullptr;
943 m_SubgraphConnections.clear();
Mike Kelly377fb212023-01-10 15:55:28 +0000944 m_OverriddenOutputShapes.clear();
Mike Kelly5880b912022-01-28 16:18:54 +0000945 m_ConstantsToDequantize.clear();
946 m_ConstantsToBeCreated.clear();
Mike Kelly377fb212023-01-10 15:55:28 +0000947 m_TensorInfos.clear();
telsoa01c577f2c2018-08-31 09:22:23 +0100948}
949
Kevin May7d96b162021-02-03 17:38:41 +0000950INetworkPtr TfLiteParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100951{
952 ResetParser();
953 m_Model = LoadModelFromFile(graphFile);
954 return CreateNetworkFromModel();
955}
956
Mike Kelly0d77ae12022-01-07 17:42:27 +0000957INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
telsoa01c577f2c2018-08-31 09:22:23 +0100958{
959 ResetParser();
960 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
961 return CreateNetworkFromModel();
962}
963
Finn Williamsb49ed182021-06-29 15:50:08 +0100964
965armnn::INetworkPtr TfLiteParserImpl::LoadModel(std::unique_ptr<tflite::ModelT> model)
966{
967 ResetParser();
968 m_Model = std::move(model);
969
970 return CreateNetworkFromModel();
971}
972
Kevin May7d96b162021-02-03 17:38:41 +0000973INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
telsoa01c577f2c2018-08-31 09:22:23 +0100974{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100975
976 using NetworkOptions = std::vector<BackendOptions>;
977 NetworkOptions networkOptions = {};
Mike Kelly80512b02022-05-16 23:10:42 +0100978 if (m_Options)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100979 {
Mike Kelly80512b02022-05-16 23:10:42 +0100980 if (m_Options.value().m_InferAndValidate)
981 {
982 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
983 {
984 { "InferAndValidate", true }
985 });
Sadik Armagand109a4d2020-07-28 10:42:13 +0100986
Mike Kelly80512b02022-05-16 23:10:42 +0100987 networkOptions.push_back(shapeInferenceMethodOption);
988 }
989 if (m_Options.value().m_AllowExpandedDims)
990 {
991 BackendOptions shapeInferenceMethodOption("AllowExpandedDims",
992 {
993 { "AllowExpandedDims", true }
994 });
995
996 networkOptions.push_back(shapeInferenceMethodOption);
997 }
Sadik Armagand109a4d2020-07-28 10:42:13 +0100998 }
Sadik Armagand109a4d2020-07-28 10:42:13 +0100999 m_Network = INetwork::Create(networkOptions);
Ryan OSheac229b3f2023-06-27 22:34:54 +01001000
1001 if (m_Model.get() == nullptr)
1002 {
1003 throw ParseException(fmt::format("Tflite Model pointer is null {}", CHECK_LOCATION().AsString()));
1004 }
telsoa01c577f2c2018-08-31 09:22:23 +01001005
telsoa01c577f2c2018-08-31 09:22:23 +01001006 if (m_Model->subgraphs.size() != 1)
1007 {
1008 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001009 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
1010 m_Model->subgraphs.size(),
1011 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001012 }
1013
1014 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +01001015 size_t operatorIndex = 0;
1016 try
telsoa01c577f2c2018-08-31 09:22:23 +01001017 {
Colm Donelan6350d272020-06-09 16:56:25 +01001018 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +01001019 {
Mike Kelly377fb212023-01-10 15:55:28 +00001020 SetupInputLayerTensorInfos(subgraphIndex);
1021 SetupConstantLayerTensorInfos(subgraphIndex);
1022
Colm Donelan6350d272020-06-09 16:56:25 +01001023 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
1024 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +01001025 {
Colm Donelan6350d272020-06-09 16:56:25 +01001026 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
Jim Flynnfca233e2021-09-23 12:16:53 +01001027
1028// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001029#if defined(ARMNN_POST_TFLITE_2_3)
Jim Flynnfca233e2021-09-23 12:16:53 +01001030 auto builtinCode = std::max(opCodePtr->builtin_code,
1031 static_cast<tflite::BuiltinOperator>(opCodePtr->deprecated_builtin_code));
1032#else
telsoa01c577f2c2018-08-31 09:22:23 +01001033 auto builtinCode = opCodePtr->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +01001034#endif
telsoa01c577f2c2018-08-31 09:22:23 +01001035
1036 if (builtinCode > tflite::BuiltinOperator_MAX)
1037 {
James Ward58dec6b2020-09-11 17:32:44 +01001038 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
1039 "subgraph:{} operator idx:{}. {}",
1040 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
1041 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001042 }
1043
1044 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +01001045 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +01001046 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +01001047 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +01001048 }
telsoa01c577f2c2018-08-31 09:22:23 +01001049
Colm Donelan6350d272020-06-09 16:56:25 +01001050 SetupInputLayers(subgraphIndex);
1051 SetupOutputLayers(subgraphIndex);
1052 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001053
Colm Donelan6350d272020-06-09 16:56:25 +01001054 ++subgraphIndex;
1055 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001056 }
telsoa01c577f2c2018-08-31 09:22:23 +01001057 }
Colm Donelan6350d272020-06-09 16:56:25 +01001058 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +01001059 {
Colm Donelan6350d272020-06-09 16:56:25 +01001060 std::stringstream errorString;
1061 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
1062 << subgraphIndex << " error: " << e.what();
1063 ARMNN_LOG(error) << errorString.str();
1064 std::stringstream errors;
1065 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +01001066 throw ParseException(errors.str());
1067 }
1068
1069 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +01001070 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001071 {
1072 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
1073 {
1074 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
1075 {
1076 for (size_t inputSlotIdx = 0;
1077 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
1078 ++inputSlotIdx)
1079 {
1080 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
1081 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
1082 }
1083 }
1084 }
1085 }
telsoa01c577f2c2018-08-31 09:22:23 +01001086 return std::move(m_Network);
1087}
1088
Mike Kelly0506ef02023-01-03 16:29:44 +00001089bool TfLiteParserImpl::ShouldConstantTensorBeConverted(TfLiteParserImpl::TensorRawPtr tensorPtr,
1090 armnn::DataType inputDataType,
1091 armnn::DataType tensorDataType)
Mike Kelly5880b912022-01-28 16:18:54 +00001092{
Mike Kelly0506ef02023-01-03 16:29:44 +00001093 return (TfLiteParserImpl::IsConstTensor(tensorPtr) && inputDataType == DataType::Float32 &&
1094 (tensorDataType == DataType::QAsymmU8 ||
1095 tensorDataType == DataType::QAsymmS8 ||
1096 tensorDataType == DataType::QSymmS8 ||
1097 tensorDataType == DataType::Signed32 ||
1098 tensorDataType == DataType::Signed64));
Mike Kelly5880b912022-01-28 16:18:54 +00001099}
1100
Kevin May7d96b162021-02-03 17:38:41 +00001101void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
1102 size_t tensorIndex,
1103 armnn::IOutputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +01001104{
1105 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001106
1107 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
1108
Nikhil Rajd4d1c312022-08-03 18:20:59 +01001109 if (slot->GetOwningIConnectableLayer().GetType() != LayerType::Constant)
telsoa01c577f2c2018-08-31 09:22:23 +01001110 {
telsoa01c577f2c2018-08-31 09:22:23 +01001111
Nikhil Rajd4d1c312022-08-03 18:20:59 +01001112 // assuming there is only one producer for that tensor
1113 if (tensorSlots.outputSlot != nullptr)
1114 {
1115 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
1116 "subgraph:{} tensor:{} {}",
1117 subgraphIndex,
1118 tensorIndex,
1119 CHECK_LOCATION().AsString()));
1120 }
1121 }
telsoa01c577f2c2018-08-31 09:22:23 +01001122 tensorSlots.outputSlot = slot;
1123}
1124
Kevin May7d96b162021-02-03 17:38:41 +00001125void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
1126 size_t tensorIndex,
1127 armnn::IInputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +01001128{
1129 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001130
Finn Williamsd4fa5452021-03-01 12:31:41 +00001131 TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01001132 tensorSlots.inputSlots.push_back(slot);
1133}
1134
Kevin May7d96b162021-02-03 17:38:41 +00001135void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001136{
1137 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1138
1139 // NOTE: By default we presume the custom operator is not supported
Kevin May7d96b162021-02-03 17:38:41 +00001140 auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001141
1142 // Identify custom code defined for custom operator
1143 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1144 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
1145
Mike Kelly377fb212023-01-10 15:55:28 +00001146 // Find parser function that corresponds to custom code (if any)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001147 auto iterator = m_CustomParserFunctions.find(customCode);
1148 if (iterator != m_CustomParserFunctions.end())
1149 {
1150 customParserFunction = iterator->second;
1151 }
1152
1153 // Run parser function
1154 (this->*customParserFunction)(subgraphIndex, operatorIndex);
1155}
1156
Kevin May7d96b162021-02-03 17:38:41 +00001157void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001158{
1159 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001160
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001161 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1162
1163 auto opcodeIndex = operatorPtr->opcode_index;
Jim Flynnfca233e2021-09-23 12:16:53 +01001164
1165// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001166#if defined(ARMNN_POST_TFLITE_2_3)
Jim Flynnfca233e2021-09-23 12:16:53 +01001167 auto opcode = std::max(m_Model->operator_codes[opcodeIndex]->builtin_code,
1168 static_cast<tflite::BuiltinOperator>(m_Model->operator_codes[opcodeIndex]->deprecated_builtin_code));
1169#else
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001170 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +01001171#endif
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001172
1173 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
1174 {
1175 // Do not add StandInLayer, throw ParseException instead
1176 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001177 fmt::format("Operator not supported. "
1178 "subgraph:{} operator:{} "
1179 "opcode_index:{} opcode:{} / {} {}",
1180 subgraphIndex,
1181 operatorIndex,
1182 opcodeIndex,
1183 opcode,
1184 tflite::EnumNameBuiltinOperator(opcode),
1185 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001186 }
1187
1188 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1189 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1190
Matthew Sloyan589e3e82020-09-11 16:17:48 +01001191 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
1192 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001193
1194 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +01001195 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001196
1197 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
1198 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001199
1200 if (!layer)
1201 {
1202 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1203 operatorIndex, CHECK_LOCATION().AsString()));
1204 }
James Conroy05102392020-06-24 15:39:55 +01001205
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001206 for (unsigned int i = 0u; i < numOutputs; ++i)
1207 {
Mike Kelly04d82292023-01-19 18:29:40 +00001208 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[0], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001209 }
1210
1211 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1212 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1213
1214 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
1215 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +01001216}
1217
mathad01b392e982021-04-07 12:07:30 +01001218void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex)
1219{
1220 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1221
1222 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1223 CHECK_VALID_SIZE(inputs.size(), 1);
1224 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1225 CHECK_VALID_SIZE(outputs.size(), 1);
1226
1227 auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex);
1228
1229 IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001230
1231 if (!layer)
1232 {
1233 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1234 operatorIndex, CHECK_LOCATION().AsString()));
1235 }
mathad01b392e982021-04-07 12:07:30 +01001236
Mike Kelly377fb212023-01-10 15:55:28 +00001237 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
mathad01b392e982021-04-07 12:07:30 +01001238 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1239
1240 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1241 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1242
1243 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1244 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1245}
1246
Kevin May7d96b162021-02-03 17:38:41 +00001247void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001248{
1249 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1250
Mike Kelly0d77ae12022-01-07 17:42:27 +00001251 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1252 const auto* options = operatorPtr->builtin_options.AsConv2DOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01001253
1254 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1255
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001256 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1257 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1258 CHECK_VALID_SIZE(outputs.size(), 1);
1259
telsoa01c577f2c2018-08-31 09:22:23 +01001260 Convolution2dDescriptor desc;
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001261 inputs.size() == 3 ?
1262 desc.m_BiasEnabled = true : desc.m_BiasEnabled = false;
telsoa01c577f2c2018-08-31 09:22:23 +01001263 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1264 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001265 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +01001266 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1267 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001268
Mike Kelly377fb212023-01-10 15:55:28 +00001269 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1270 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001271
1272 // assuming input is NHWC
1273 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001274 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
telsoa01c577f2c2018-08-31 09:22:23 +01001275
1276 // assuming the filter is OHWI : Output, H, W, Input
1277 // which is essentially the same as NHWC
1278 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001279 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
telsoa01c577f2c2018-08-31 09:22:23 +01001280
Pablo Tellof0bd6832019-04-26 17:58:13 +01001281 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1282 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1283 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1284 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001285
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001286 // Add the first input and weights tensor to the registration list.
1287 // The constant weights will be added by SetupConstantLayers.
1288 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1289 std::vector<unsigned int> tensorIndexesToRegister = { inputTensorIndexes[0], inputTensorIndexes[1] };
telsoa01c577f2c2018-08-31 09:22:23 +01001290
James Ward58dec6b2020-09-11 17:32:44 +01001291 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001292 armnn::IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, layerName.c_str());
telsoa01c577f2c2018-08-31 09:22:23 +01001293
Mike Kelly0506ef02023-01-03 16:29:44 +00001294 if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
telsoa01c577f2c2018-08-31 09:22:23 +01001295 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001296 m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
telsoa01c577f2c2018-08-31 09:22:23 +01001297 }
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001298
1299 if (desc.m_BiasEnabled)
telsoa01c577f2c2018-08-31 09:22:23 +01001300 {
Mike Kelly377fb212023-01-10 15:55:28 +00001301 armnn::TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001302
1303 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1304 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
1305
Mike Kelly0506ef02023-01-03 16:29:44 +00001306 if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001307 {
1308 m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
1309 }
telsoa01c577f2c2018-08-31 09:22:23 +01001310 }
1311
Ryan OSheac229b3f2023-06-27 22:34:54 +01001312 if (!layer)
1313 {
1314 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1315 operatorIndex, CHECK_LOCATION().AsString()));
1316 }
telsoa01c577f2c2018-08-31 09:22:23 +01001317
Mike Kelly377fb212023-01-10 15:55:28 +00001318 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
jimfly01c25411c2018-11-14 17:47:22 +00001319 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001320
1321 // register the input connection slots for the layer, connections are made after all layers have been created
1322 // only the tensors for the inputs are relevant, exclude the const tensors
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001323 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
telsoa01c577f2c2018-08-31 09:22:23 +01001324
jimfly01c25411c2018-11-14 17:47:22 +00001325 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001326 // register the output connection slots for the layer, connections are made after all layers have been created
1327 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001328 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, { outputTensorIndexes[0] });
telsoa01c577f2c2018-08-31 09:22:23 +01001329}
1330
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001331// Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
Cathal Corbett80b4ef02022-05-25 11:21:11 +01001332#if defined(ARMNN_POST_TFLITE_2_4)
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001333void TfLiteParserImpl::ParseConv3D(size_t subgraphIndex, size_t operatorIndex)
1334{
1335 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1336
1337 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1338 const auto* options = operatorPtr->builtin_options.AsConv3DOptions();
1339
1340 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1341
1342 Convolution3dDescriptor desc;
1343 desc.m_BiasEnabled = false;
1344 desc.m_DataLayout = armnn::DataLayout::NDHWC;
1345 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1346 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1347 desc.m_StrideZ = CHECKED_NON_NEGATIVE(options->stride_d);
1348 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1349 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
1350 desc.m_DilationZ = CHECKED_NON_NEGATIVE(options->dilation_d_factor);
1351
1352 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1353 CHECK_VALID_SIZE(inputs.size(), 2, 3);
1354
1355 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1356 CHECK_VALID_SIZE(outputs.size(), 1);
1357
Mike Kelly377fb212023-01-10 15:55:28 +00001358 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1359 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001360
1361 // Assuming input is NDHWC
1362 unsigned int inputDepth = inputTensorInfo.GetShape()[1];
1363 unsigned int inputHeight = inputTensorInfo.GetShape()[2];
1364 unsigned int inputWidth = inputTensorInfo.GetShape()[3];
1365
1366 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
1367 unsigned int filterDepth = filterTensorInfo.GetShape()[0];
1368 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1369 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1370
1371 CalcPadding(inputDepth, filterDepth, desc.m_StrideZ,
Teresa Charlin502ab942022-03-23 17:23:07 +00001372 desc.m_DilationZ, desc.m_PadFront, desc.m_PadBack, options->padding);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001373 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1374 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1375 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1376 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
1377
Mike Kelly5880b912022-01-28 16:18:54 +00001378 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001379
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001380 auto layerName = fmt::format("Conv3D:{}:{}", subgraphIndex, operatorIndex);
1381
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001382 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1383 // Add the first input and weights tensor to the registration list.
1384 // The constant weights will be added by SetupConstantLayers.
1385 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
1386
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001387 if (inputs.size() == 3)
1388 {
1389 desc.m_BiasEnabled = true;
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001390
1391 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1392 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001393 }
1394
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001395 armnn::IConnectableLayer* layer = m_Network->AddConvolution3dLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001396
1397 if (!layer)
1398 {
1399 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1400 operatorIndex, CHECK_LOCATION().AsString()));
1401 }
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001402
Mike Kelly377fb212023-01-10 15:55:28 +00001403 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001404 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1405
1406 // Register the input connection slots for the layer, connections are made after all layers have been created
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001407 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001408
1409 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1410 // Register the output connection slots for the layer, connections are made after all layers have been created
1411 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1412 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1413}
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001414#endif
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001415
Kevin May7d96b162021-02-03 17:38:41 +00001416void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001417{
1418 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1419
Mike Kelly0d77ae12022-01-07 17:42:27 +00001420 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1421 const auto* options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01001422
1423 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1424
1425 DepthwiseConvolution2dDescriptor desc;
telsoa01c577f2c2018-08-31 09:22:23 +01001426 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1427 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001428 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001429 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +01001430
1431 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1432 CHECK_VALID_SIZE(inputs.size(), 2, 3);
Cathal Corbett06902652022-04-14 17:55:11 +01001433 if (inputs.size() == 3)
1434 {
1435 desc.m_BiasEnabled = true;
1436 }
1437
telsoa01c577f2c2018-08-31 09:22:23 +01001438 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1439 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +01001440 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1441 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001442
Mike Kelly377fb212023-01-10 15:55:28 +00001443 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1444 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001445
Matteo Martincigh747ef822018-12-18 09:26:39 +00001446 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +01001447 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1448 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +00001449
1450 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +01001451 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1452 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1453
Pablo Tellof0bd6832019-04-26 17:58:13 +01001454 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1455 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1456 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1457 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001458
Jan Eilers53ef7952021-06-02 12:01:25 +01001459 // ArmNN uses the same filter tensor layout at TfLite [1, H, W, O] no need for any permutation
James Ward58dec6b2020-09-11 17:32:44 +01001460 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001461
Cathal Corbett06902652022-04-14 17:55:11 +01001462 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1463 // Add the first input and weights tensor to the registration list.
1464 // The constant weights will be added by SetupConstantLayers.
1465 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
1466
1467 armnn::IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, layerName.c_str());
1468
1469 if (desc.m_BiasEnabled)
telsoa01c577f2c2018-08-31 09:22:23 +01001470 {
1471 desc.m_BiasEnabled = true;
Mike Kelly377fb212023-01-10 15:55:28 +00001472 TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Cathal Corbett06902652022-04-14 17:55:11 +01001473
1474 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1475 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
telsoa01c577f2c2018-08-31 09:22:23 +01001476 }
Ryan OSheac229b3f2023-06-27 22:34:54 +01001477
1478 if (!layer)
1479 {
1480 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1481 operatorIndex, CHECK_LOCATION().AsString()));
1482 }
telsoa01c577f2c2018-08-31 09:22:23 +01001483
Mike Kelly377fb212023-01-10 15:55:28 +00001484 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
jimfly01c25411c2018-11-14 17:47:22 +00001485 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001486
1487 // register the input connection slots for the layer, connections are made after all layers have been created
1488 // only the tensors for the inputs are relevant, exclude the const tensors
Cathal Corbett06902652022-04-14 17:55:11 +01001489 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
telsoa01c577f2c2018-08-31 09:22:23 +01001490
jimfly01c25411c2018-11-14 17:47:22 +00001491 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001492 // register the output connection slots for the layer, connections are made after all layers have been created
1493 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1494 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1495}
1496
Kevin May7d96b162021-02-03 17:38:41 +00001497void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsed66d142019-12-06 09:55:55 +00001498{
1499 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1500
1501 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1502 CHECK_VALID_SIZE(inputs.size(), 1);
1503
1504 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1505 CHECK_VALID_SIZE(outputs.size(), 1);
1506
James Ward58dec6b2020-09-11 17:32:44 +01001507 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +00001508
1509 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001510
1511 if (!layer)
1512 {
1513 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1514 operatorIndex, CHECK_LOCATION().AsString()));
1515 }
Finn Williamsed66d142019-12-06 09:55:55 +00001516
Mike Kelly377fb212023-01-10 15:55:28 +00001517 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Finn Williamsed66d142019-12-06 09:55:55 +00001518 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1519
1520 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1521 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1522
1523 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1524 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1525}
1526
Teresa Charlin3ab85482021-06-08 16:59:29 +01001527void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorIndex)
1528{
1529 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1530
1531 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1532 CHECK_VALID_SIZE(inputs.size(), 2);
1533
1534 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1535 CHECK_VALID_SIZE(outputs.size(), 1);
1536
1537 auto layerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
1538
Mike Kelly377fb212023-01-10 15:55:28 +00001539 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001540 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001541 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1542
Teresa Charlina7a605a2023-06-14 14:51:17 +01001543 armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
1544
1545 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1546 if (axisBufferPtr == nullptr)
1547 {
1548 throw ParseException(fmt::format("{}: Operation has invalid inputs. Failed to read axis.",
1549 CHECK_LOCATION().AsString()));
1550 }
1551
1552 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
1553 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
1554 int32_t axis = axisData[0];
1555
1556 auto inputRank = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
1557 auto outputRank = inputRank + 1;
1558 if((axis < -1 * outputRank) || (outputRank <= axis))
1559 {
1560 throw ParseException(fmt::format("{}: Axis {} is not within [-{}, {}) range.",
1561 CHECK_LOCATION().AsString(), axis, outputRank, outputRank));
1562 }
1563
1564 axis = axis < 0 ? (axis + outputRank) : axis;
1565
1566 std::vector<unsigned int> shape(static_cast<unsigned int>(outputRank));
1567 unsigned int inputShapeIndex = 0;
1568 for (unsigned int i = 0; i < static_cast<unsigned int>(outputRank); ++i)
1569 {
1570 if (i == static_cast<unsigned int>(axis))
1571 {
1572 shape[i] = 1;
1573 }
1574 else
1575 {
1576 shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
1577 ++inputShapeIndex;
1578 }
1579 }
1580
Teresa Charlin3ab85482021-06-08 16:59:29 +01001581 ReshapeDescriptor reshapeDesc;
Teresa Charlina7a605a2023-06-14 14:51:17 +01001582 reshapeDesc.m_TargetShape = TensorShape(static_cast<unsigned int>(outputRank), shape.data());
1583 outputTensorInfo.SetShape(reshapeDesc.m_TargetShape);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001584
1585 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001586
1587 if (!layer)
1588 {
1589 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1590 operatorIndex, CHECK_LOCATION().AsString()));
1591 } layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001592
Teresa Charlina7a605a2023-06-14 14:51:17 +01001593 auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
1594 m_TensorInfos[outputTensorIds[0]] = outputTensorInfo;
1595
Teresa Charlin3ab85482021-06-08 16:59:29 +01001596 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1597 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1598
1599 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1600 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1601}
1602
Kevin May7d96b162021-02-03 17:38:41 +00001603void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
Keith Davis4cd29a02019-09-09 14:49:20 +01001604{
1605 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1606
1607 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001608 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001609
1610 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1611 CHECK_VALID_SIZE(outputs.size(), 1);
1612
James Ward58dec6b2020-09-11 17:32:44 +01001613 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001614 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001615
josh minorba424d22019-11-13 10:55:17 -06001616 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001617 {
Mike Kelly377fb212023-01-10 15:55:28 +00001618 armnn::TensorInfo permuteTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Kevin May85d92602019-09-27 17:21:06 +01001619 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001620 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1621 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001622 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001623 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001624
Mike Kelly08759e22020-03-02 11:41:31 +00001625 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001626 }
Mike Kelly377fb212023-01-10 15:55:28 +00001627 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Keith Davis4cd29a02019-09-09 14:49:20 +01001628
James Conroy05102392020-06-24 15:39:55 +01001629 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001630
1631 if (!layer)
1632 {
1633 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1634 operatorIndex, CHECK_LOCATION().AsString()));
1635 }
Mike Kelly377fb212023-01-10 15:55:28 +00001636
1637 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1638 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001639 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1640
1641 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1642 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1643
1644 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1645 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1646}
1647
Kevin May7d96b162021-02-03 17:38:41 +00001648void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001649{
1650 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1651
Mike Kelly0d77ae12022-01-07 17:42:27 +00001652 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1653 const auto* options = operatorPtr->builtin_options.AsTransposeConvOptions();
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001654
1655 TransposeConvolution2dDescriptor desc;
1656 desc.m_BiasEnabled = false;
1657 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1658 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1659 desc.m_DataLayout = armnn::DataLayout::NHWC;
1660
1661 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
David Monahan61683802021-01-12 09:11:07 +00001662 if (inputs.size() == 4)
1663 {
1664 desc.m_BiasEnabled = true;
1665 }
1666 else
1667 {
1668 CHECK_VALID_SIZE(inputs.size(), 3);
1669 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001670
1671 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1672 CHECK_VALID_SIZE(outputs.size(), 1);
1673
Teresa Charlin024ef0b2023-04-26 11:19:03 +01001674
1675 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
1676 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
1677
1678 // TfLite uses NHWC tensors
1679 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1680 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1681
1682 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1683 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1684
Ryan OSheaf0a35b82023-02-21 18:32:30 +00001685 // This block determines the output shape of the transpose convolution. If the output shape tensor ptr is not null
1686 // And the tensor is a constant, we can access the data at load time and set the output shape of the
1687 // layer. If this is not constant, We do not have access to the shape data, so we have to use
1688 // infer output shape and skip this code block.
1689 if (inputs[0] && IsConstTensor(inputs[0]))
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001690 {
Mike Kelly377fb212023-01-10 15:55:28 +00001691 armnn::TensorInfo tensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001692 std::vector<int> output_shape(tensorInfo.GetNumElements());
Mike Kelly377fb212023-01-10 15:55:28 +00001693
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001694 if (tensorInfo.GetDataType() == DataType::Signed32)
1695 {
1696 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1697 }
1698 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1699 {
1700 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1701 {
1702 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1703 }
1704 }
1705 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1706 for (int dimension : output_shape)
1707 {
1708 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1709 }
1710 desc.m_OutputShapeEnabled = true;
Teresa Charlin024ef0b2023-04-26 11:19:03 +01001711
1712 // TfLite uses NHWC tensors
1713 const unsigned int outputHeight = desc.m_OutputShape[1];
1714 const unsigned int outputWidth = desc.m_OutputShape[2];
1715
1716 CalcPadding(inputHeight,
1717 filterHeight,
1718 desc.m_StrideY,
1719 1, // DilationY
1720 desc.m_PadTop,
1721 desc.m_PadBottom,
1722 options->padding,
1723 outputHeight);
1724
1725 CalcPadding(inputWidth,
1726 filterWidth,
1727 desc.m_StrideX,
1728 1, // DilationX
1729 desc.m_PadLeft,
1730 desc.m_PadRight,
1731 options->padding,
1732 outputWidth);
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001733 }
Teresa Charlin024ef0b2023-04-26 11:19:03 +01001734 else
1735 {
1736 CalcPadding(inputHeight,
1737 filterHeight,
1738 desc.m_StrideY,
1739 1, // DilationY
1740 desc.m_PadTop,
1741 desc.m_PadBottom,
1742 options->padding);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001743
Teresa Charlin024ef0b2023-04-26 11:19:03 +01001744 CalcPadding(inputWidth,
1745 filterWidth,
1746 desc.m_StrideX,
1747 1, // DilationX
1748 desc.m_PadLeft,
1749 desc.m_PadRight,
1750 options->padding);
1751 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001752
Mike Kelly5880b912022-01-28 16:18:54 +00001753 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001754
1755 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001756 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001757
David Monahan61683802021-01-12 09:11:07 +00001758 if (desc.m_BiasEnabled)
1759 {
Mike Kelly377fb212023-01-10 15:55:28 +00001760 auto biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 3);
Mike Kelly5880b912022-01-28 16:18:54 +00001761 auto biasConstTensor = CreateConstTensorNonPermuted(inputs[3], biasTensorInfo, inputTensorInfo.GetDataType());
David Monahan61683802021-01-12 09:11:07 +00001762 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Mike Kelly5880b912022-01-28 16:18:54 +00001763 filterTensorAndData.first,
1764 biasConstTensor.first,
David Monahan61683802021-01-12 09:11:07 +00001765 layerName.c_str());
1766 }
1767 else
1768 {
1769 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Mike Kelly5880b912022-01-28 16:18:54 +00001770 filterTensorAndData.first,
David Monahan61683802021-01-12 09:11:07 +00001771 EmptyOptional(),
1772 layerName.c_str());
1773 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001774
Ryan OSheac229b3f2023-06-27 22:34:54 +01001775 if (!layer)
1776 {
1777 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1778 operatorIndex, CHECK_LOCATION().AsString()));
1779 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001780
Mike Kelly377fb212023-01-10 15:55:28 +00001781 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0 , { 2, 1 });
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001782 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1783
1784 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1785 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001786 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001787
1788 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1789 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1790}
1791
Kevin May7d96b162021-02-03 17:38:41 +00001792void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001793{
1794 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1795}
1796
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001797void TfLiteParserImpl::ParseBatchMatMul(size_t subgraphIndex, size_t operatorIndex)
1798{
1799 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1800
1801 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1802 CHECK_VALID_SIZE(inputs.size(), 2);
1803
1804 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1805 CHECK_VALID_SIZE(outputs.size(), 1);
1806
1807 auto layerName = fmt::format("BatchMatMul:{}:{}", subgraphIndex, operatorIndex);
1808
Mike Kelly377fb212023-01-10 15:55:28 +00001809 TensorInfo inputXTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1810 TensorInfo inputYTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001811
1812 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1813 const auto* options = operatorPtr->builtin_options.AsBatchMatMulOptions();
1814
Teresa Charlinbc37a6b2022-09-22 10:12:58 +01001815 // Adjoint in tensorflow lite performs transpose operation
1816 BatchMatMulDescriptor descriptor(options->adj_x,
1817 options->adj_y,
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001818 false,
Teresa Charlinbc37a6b2022-09-22 10:12:58 +01001819 false);
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001820 // Arbitrary DataLayout
1821
1822 IConnectableLayer* layer = m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001823
1824 if (!layer)
1825 {
1826 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1827 operatorIndex, CHECK_LOCATION().AsString()));
1828 }
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001829
Mike Kelly377fb212023-01-10 15:55:28 +00001830 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001831 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1832
1833 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1834 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1835
1836 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1837 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1838}
1839
Kevin May7d96b162021-02-03 17:38:41 +00001840void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001841{
1842 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1843
1844 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1845 CHECK_VALID_SIZE(inputs.size(), 3);
1846
1847 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1848 CHECK_VALID_SIZE(outputs.size(), 1);
1849
Mike Kelly377fb212023-01-10 15:55:28 +00001850 armnn::TensorInfo blockShapeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001851 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1852
Mike Kelly377fb212023-01-10 15:55:28 +00001853 armnn::TensorInfo cropsTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001854 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1855
1856 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1857 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1858
1859 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1860 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1861
1862 size_t step = 2;
1863 std::vector<std::pair<unsigned int, unsigned int>> crops;
1864 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1865 {
1866 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1867 }
1868
1869 armnn::BatchToSpaceNdDescriptor desc;
1870 desc.m_BlockShape = blockShape;
1871 desc.m_Crops = crops;
1872 desc.m_DataLayout = armnn::DataLayout::NHWC;
1873
James Ward58dec6b2020-09-11 17:32:44 +01001874 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001875
Mike Kelly377fb212023-01-10 15:55:28 +00001876 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01001877
1878 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001879
1880 if (!layer)
1881 {
1882 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1883 operatorIndex, CHECK_LOCATION().AsString()));
1884 }
Mike Kelly377fb212023-01-10 15:55:28 +00001885
1886 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1887 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001888 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1889
1890 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1891 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1892
1893 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1894 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1895}
1896
Kevin May7d96b162021-02-03 17:38:41 +00001897void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson28c94572019-07-18 10:47:03 +01001898{
1899 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1900
1901 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1902 CHECK_VALID_SIZE(inputs.size(), 1);
1903
1904 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1905 CHECK_VALID_SIZE(outputs.size(), 1);
1906
1907 L2NormalizationDescriptor desc;
1908 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001909 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001910 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1911
Ryan OSheac229b3f2023-06-27 22:34:54 +01001912 if (!layer)
1913 {
1914 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1915 operatorIndex, CHECK_LOCATION().AsString()));
1916 }
Matthew Jackson28c94572019-07-18 10:47:03 +01001917
Mike Kelly377fb212023-01-10 15:55:28 +00001918 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Jackson28c94572019-07-18 10:47:03 +01001919 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1920
1921 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1922 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1923
1924 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1925 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1926}
1927
Kevin May7d96b162021-02-03 17:38:41 +00001928void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001929{
1930 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1931}
1932
Kevin May7d96b162021-02-03 17:38:41 +00001933void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001934{
1935 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1936
1937 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1938 CHECK_VALID_SIZE(inputs.size(), 2);
1939
1940 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1941 CHECK_VALID_SIZE(outputs.size(), 1);
1942
James Ward58dec6b2020-09-11 17:32:44 +01001943 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001944
Mike Kelly377fb212023-01-10 15:55:28 +00001945 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1946 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
James Conroy05102392020-06-24 15:39:55 +01001947 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001948
Mike Kelly3ec30772023-03-08 13:47:17 +00001949 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Maximum, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001950
1951 if (!layer)
1952 {
1953 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1954 operatorIndex, CHECK_LOCATION().AsString()));
1955 }
Mike Kelly377fb212023-01-10 15:55:28 +00001956
1957 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
1958 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001959 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1960
1961 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001962 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001963
1964 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1965 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1966}
1967
Kevin May7d96b162021-02-03 17:38:41 +00001968void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001969{
1970 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1971
1972 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1973 CHECK_VALID_SIZE(inputs.size(), 2);
1974
1975 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1976 CHECK_VALID_SIZE(outputs.size(), 1);
1977
James Ward58dec6b2020-09-11 17:32:44 +01001978 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001979
Mike Kelly377fb212023-01-10 15:55:28 +00001980 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1981 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
James Conroy05102392020-06-24 15:39:55 +01001982 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001983
Mike Kelly3ec30772023-03-08 13:47:17 +00001984 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Minimum, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01001985
1986 if (!layer)
1987 {
1988 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
1989 operatorIndex, CHECK_LOCATION().AsString()));
1990 }
Mike Kelly377fb212023-01-10 15:55:28 +00001991
1992 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
1993 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001994 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1995
1996 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001997 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001998
1999 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2000 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2001}
2002
Kevin May7d96b162021-02-03 17:38:41 +00002003void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
2004 size_t operatorIndex,
2005 PoolingAlgorithm algorithm)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002006{
2007 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2008
Mike Kelly0d77ae12022-01-07 17:42:27 +00002009 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2010 const auto* options = operatorPtr->builtin_options.AsPool2DOptions();
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002011
2012 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2013
2014 std::string layerName;
2015
2016 switch (algorithm)
2017 {
2018 case PoolingAlgorithm::Average:
2019 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01002020 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002021 break;
2022 case PoolingAlgorithm::Max:
2023 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01002024 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002025 break;
2026 default:
Ryan OSheac229b3f2023-06-27 22:34:54 +01002027 throw ParseException(fmt::format("Unsupported Pooling Algorithm {}", CHECK_LOCATION().AsString()));
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002028 }
2029
2030 Pooling2dDescriptor desc;
2031
2032 desc.m_PoolType = algorithm;
2033 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
2034 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
2035 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
2036 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
2037 desc.m_PaddingMethod = PaddingMethod::Exclude;
2038 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00002039 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002040
2041 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2042 CHECK_VALID_SIZE(inputs.size(), 1);
Mike Kelly377fb212023-01-10 15:55:28 +00002043 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002044
2045 // assuming input is NHWC
2046 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
2047 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
2048
Pablo Tellof0bd6832019-04-26 17:58:13 +01002049 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
2050 desc.m_PadTop, desc.m_PadBottom, options->padding);
2051 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
2052 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002053
2054 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2055 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002056
James Conroy05102392020-06-24 15:39:55 +01002057 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002058
2059 if (!layer)
2060 {
2061 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2062 operatorIndex, CHECK_LOCATION().AsString()));
2063 }
Mike Kelly377fb212023-01-10 15:55:28 +00002064
2065 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2066 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
jimfly01c25411c2018-11-14 17:47:22 +00002067 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002068
2069 // register the input connection slots for the layer, connections are made after all layers have been created
2070 // only the tensors for the inputs are relevant, exclude the const tensors
2071 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00002072 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002073
jimfly01c25411c2018-11-14 17:47:22 +00002074 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01002075 // register the output connection slots for the layer, connections are made after all layers have been created
2076 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2077 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2078}
2079
Kevin May7d96b162021-02-03 17:38:41 +00002080void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
josh minorba424d22019-11-13 10:55:17 -06002081{
2082 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2083
2084 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2085 CHECK_VALID_SIZE(inputs.size(), 3);
2086 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2087 CHECK_VALID_SIZE(outputs.size(), 1);
2088
2089 SliceDescriptor desc;
2090
2091 // set begin tensor info for slice descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00002092 armnn::TensorInfo beginTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
josh minorba424d22019-11-13 10:55:17 -06002093 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2094
2095 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
2096 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
2097
2098 // set size tensor info for slice descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00002099 armnn::TensorInfo sizeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
josh minorba424d22019-11-13 10:55:17 -06002100 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2101
Cathal Corbettde33dda2022-09-20 16:40:09 +01002102 std::vector<int> signedSize(sizeTensorInfo.GetNumElements(), 1);
2103
2104 // if size buffer data is not specified, all contents of size vector remain as values of 1
2105 if (sizeBufferPtr->data.data())
2106 {
2107 ::memcpy(signedSize.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2108 }
2109
josh minorba424d22019-11-13 10:55:17 -06002110 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
Mike Kelly377fb212023-01-10 15:55:28 +00002111 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly7ba84d62021-09-10 15:27:19 +01002112
2113 for (unsigned int i = 0; i < signedSize.size(); ++i)
2114 {
2115 int signedValue = signedSize[i];
Jim Flynnfca233e2021-09-23 12:16:53 +01002116
Mike Kelly7ba84d62021-09-10 15:27:19 +01002117 if (signedValue < -1 || signedValue > static_cast<int>(inputTensorInfo.GetShape()[i] - begin[i]))
2118 {
2119 throw ParseException(fmt::format("Invalid value for size {} size must be in range "
2120 "[-1, inputDimSize - begin] [-1, {}] inclusive {}",
2121 signedValue,
2122 inputTensorInfo.GetShape()[i] - begin[i],
2123 CHECK_LOCATION().AsString()));
2124 }
2125
2126 if (signedValue == -1)
2127 {
2128 size[i] = inputTensorInfo.GetShape()[i] - begin[i];
2129 }
2130 else
2131 {
2132 size[i] = static_cast<unsigned int>(signedValue);
2133 }
2134 }
2135
josh minorba424d22019-11-13 10:55:17 -06002136 desc = SliceDescriptor(begin, size);
2137
James Ward58dec6b2020-09-11 17:32:44 +01002138 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06002139
James Conroy05102392020-06-24 15:39:55 +01002140 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
Mike Kelly377fb212023-01-10 15:55:28 +00002141
2142 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2143 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
josh minorba424d22019-11-13 10:55:17 -06002144 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2145
2146 // register the input connection slots for the layer, connections are made after all layers have been created
2147 // only the tensors for the inputs are relevant, exclude the const tensors
2148 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2149 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2150
2151 // register the output connection slots for the layer, connections are made after all layers have been created
2152 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2153 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2154}
2155
Kevin May7d96b162021-02-03 17:38:41 +00002156void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01002157{
2158 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002159 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2160 const auto* options = operatorPtr->builtin_options.AsSoftmaxOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01002161
2162 SoftmaxDescriptor desc;
2163 desc.m_Beta = options->beta;
2164
2165 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2166 CHECK_VALID_SIZE(inputs.size(), 1);
2167 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2168 CHECK_VALID_SIZE(outputs.size(), 1);
2169
James Ward58dec6b2020-09-11 17:32:44 +01002170 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01002171 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
2172
Mike Kelly377fb212023-01-10 15:55:28 +00002173 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
telsoa01c577f2c2018-08-31 09:22:23 +01002174 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2175
2176 // register the input connection slots for the layer, connections are made after all layers have been created
2177 // only the tensors for the inputs are relevant, exclude the const tensors
2178 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2179 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2180
2181 // register the output connection slots for the layer, connections are made after all layers have been created
2182 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2183 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2184}
2185
Teresa Charlinfd33a692022-06-29 15:35:57 +01002186void TfLiteParserImpl::ParseLogSoftmax(size_t subgraphIndex, size_t operatorIndex)
2187{
2188 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2189
2190 LogSoftmaxDescriptor desc;
2191
2192 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2193 CHECK_VALID_SIZE(inputs.size(), 1);
2194 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2195 CHECK_VALID_SIZE(outputs.size(), 1);
2196
2197 auto layerName = fmt::format("LogSoftmax:{}:{}", subgraphIndex, operatorIndex);
2198 IConnectableLayer* const layer = m_Network->AddLogSoftmaxLayer(desc, layerName.c_str());
2199
Mike Kelly377fb212023-01-10 15:55:28 +00002200 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Teresa Charlinfd33a692022-06-29 15:35:57 +01002201 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2202
2203 // register the input connection slots for the layer, connections are made after all layers have been created
2204 // only the tensors for the inputs are relevant, exclude the const tensors
2205 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2206 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2207
2208 // register the output connection slots for the layer, connections are made after all layers have been created
2209 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2210 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2211}
2212
Kevin May7d96b162021-02-03 17:38:41 +00002213void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002214{
2215 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2216
2217 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2218 CHECK_VALID_SIZE(inputs.size(), 3);
2219
2220 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2221 CHECK_VALID_SIZE(outputs.size(), 1);
2222
Mike Kelly377fb212023-01-10 15:55:28 +00002223 armnn::TensorInfo blockShapeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002224 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2225
Mike Kelly377fb212023-01-10 15:55:28 +00002226 armnn::TensorInfo padListTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002227 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2228
2229 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
2230 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
2231
2232 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
2233 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
2234
2235 size_t step = 2;
2236 std::vector<std::pair<unsigned int, unsigned int>> padList;
2237 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
2238 {
2239 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
2240 }
2241
2242 armnn::SpaceToBatchNdDescriptor desc;
2243 desc.m_BlockShape = blockShape;
2244 desc.m_PadList = padList;
2245 desc.m_DataLayout = armnn::DataLayout::NHWC;
2246
James Ward58dec6b2020-09-11 17:32:44 +01002247 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002248
Mike Kelly377fb212023-01-10 15:55:28 +00002249 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01002250
2251 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002252
2253 if (!layer)
2254 {
2255 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2256 operatorIndex, CHECK_LOCATION().AsString()));
2257 }
Mike Kelly377fb212023-01-10 15:55:28 +00002258
2259 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2260 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002261 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2262
2263 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2264 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2265
2266 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2267 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2268}
2269
Teresa Charlin2a764ad2023-02-24 18:17:31 +00002270void TfLiteParserImpl::ParseSpaceToDepth(size_t subgraphIndex, size_t operatorIndex)
2271{
2272 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2273
2274 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2275 CHECK_VALID_SIZE(inputs.size(), 1);
2276 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2277 CHECK_VALID_SIZE(outputs.size(), 1);
2278
2279 armnn::SpaceToDepthDescriptor descriptor;
2280
2281 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2282 const auto* options = operatorPtr->builtin_options.AsSpaceToDepthOptions();
2283 auto blockSize = options->block_size;
2284 if (blockSize < 2)
2285 {
2286 throw ParseException(
2287 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
2288 blockSize,
2289 CHECK_LOCATION().AsString()));
2290 }
2291 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
2292
2293 auto layerName = fmt::format("SpaceToDepth:{}:{}", subgraphIndex, operatorIndex);
2294 IConnectableLayer* layer = m_Network->AddSpaceToDepthLayer(descriptor, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002295
2296 if (!layer)
2297 {
2298 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2299 operatorIndex, CHECK_LOCATION().AsString()));
2300 }
2301
Teresa Charlin2a764ad2023-02-24 18:17:31 +00002302 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2303 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2304
2305 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2306 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2307
2308 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2309 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2310}
2311
Teresa Charlin3ab85482021-06-08 16:59:29 +01002312armnn::TensorInfo TfLiteParserImpl::OutputShapeOfSqueeze(std::vector<uint32_t> squeezeDims,
Mike Kelly0d77ae12022-01-07 17:42:27 +00002313 const armnn::TensorInfo& inputTensorInfo)
telsoa01c577f2c2018-08-31 09:22:23 +01002314{
Teresa Charlin3ab85482021-06-08 16:59:29 +01002315 CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
telsoa01c577f2c2018-08-31 09:22:23 +01002316 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2317
2318 if (inputTensorInfo.GetNumDimensions() > 4)
2319 {
2320 std::stringstream ss;
2321 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2322 << " shape:" << inputTensorInfo.GetShape() << " "
2323 << CHECK_LOCATION().AsString();
2324 throw ParseException(ss.str());
2325 }
2326
2327 if (squeezeDims.empty())
2328 {
2329 squeezeDims.assign(dimensionSequence,
2330 dimensionSequence+inputTensorInfo.GetNumDimensions());
2331 }
2332
2333 std::vector<uint32_t> outputDims;
2334 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2335 {
2336 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2337 auto currentDimension = inputTensorInfo.GetShape()[i];
2338 if (skipSqueeze || currentDimension != 1)
2339 {
2340 outputDims.push_back(currentDimension);
2341 }
2342 }
2343
2344 if (outputDims.size() > 4)
2345 {
2346 std::stringstream ss;
2347 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2348 << " shape:" << inputTensorInfo.GetShape() << " "
2349 << CHECK_LOCATION().AsString();
2350 throw ParseException(ss.str());
2351 }
2352
2353 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2354 outputDims.data());
2355
2356 // we need to preserve the tensor type and the quantization data as well
2357 TensorInfo outTensorInfo = inputTensorInfo;
2358 outTensorInfo.SetShape(outShape);
2359
2360 return outTensorInfo;
2361}
2362
Keith Davis0176fd82021-06-01 17:36:32 +01002363void TfLiteParserImpl::ParseShape(size_t subgraphIndex, size_t operatorIndex)
2364{
2365 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2366
2367 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2368 CHECK_VALID_SIZE(inputs.size(), 1);
2369 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2370 CHECK_VALID_SIZE(outputs.size(), 1);
2371
2372 auto layerName = fmt::format("Shape:{}:{}", subgraphIndex, operatorIndex);
2373
2374 IConnectableLayer* layer = m_Network->AddShapeLayer(layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002375
2376 if (!layer)
2377 {
2378 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2379 operatorIndex, CHECK_LOCATION().AsString()));
2380 }
Keith Davis0176fd82021-06-01 17:36:32 +01002381
Mike Kelly377fb212023-01-10 15:55:28 +00002382 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Keith Davis0176fd82021-06-01 17:36:32 +01002383 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2384
2385 // Check if output tensor type is Signed32 or Signed64
2386 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
2387 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
2388 {
2389 throw ParseException(
2390 fmt::format(
2391 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
2392 CHECK_LOCATION().AsString()));
2393 }
2394
2395 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2396 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2397
2398 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2399 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2400}
2401
Kevin May7d96b162021-02-03 17:38:41 +00002402void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01002403{
2404 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2405
2406 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2407 CHECK_VALID_SIZE(inputs.size(), 1);
2408
2409 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2410 CHECK_VALID_SIZE(outputs.size(), 1);
2411
2412 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2413 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002414 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01002415
Mike Kelly377fb212023-01-10 15:55:28 +00002416 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Teresa Charlin3ab85482021-06-08 16:59:29 +01002417
2418 std::vector<uint32_t> squeezeDim;
2419 // A single negative dim index is interpreted as a negative index in python
2420 // Meaning the index will be the shape size plus the negative index value
2421 if (options->squeeze_dims.size() == 1 && options->squeeze_dims[0] < 0)
2422 {
2423 int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
2424 squeezeDim.push_back(static_cast<uint32_t>(dim));
2425 }
2426 else
2427 {
2428 squeezeDim = AsUnsignedVector(options->squeeze_dims);
2429 }
2430
2431 armnn::TensorInfo outputTensorInfo = TfLiteParserImpl::OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
2432
James Conroy05102392020-06-24 15:39:55 +01002433 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01002434
2435 ReshapeDescriptor reshapeDesc;
2436 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
2437
Mike Kellyb2293702023-02-14 17:16:12 +00002438 auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
2439 m_TensorInfos[outputTensorIds[0]] = outputTensorInfo;
2440
telsoa01c577f2c2018-08-31 09:22:23 +01002441 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002442
2443 if (!layer)
2444 {
2445 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2446 operatorIndex, CHECK_LOCATION().AsString()));
2447 }
2448
telsoa01c577f2c2018-08-31 09:22:23 +01002449 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2450
2451 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2452 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2453
2454 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2455 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2456}
2457
Kevin May7d96b162021-02-03 17:38:41 +00002458void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002459{
2460 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2461
2462 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2463 CHECK_VALID_SIZE(inputs.size(), 4);
2464
2465 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2466 CHECK_VALID_SIZE(outputs.size(), 1);
2467
Mike Kelly0d77ae12022-01-07 17:42:27 +00002468 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2469 const auto* options = operatorPtr->builtin_options.AsStridedSliceOptions();
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002470
2471 StridedSliceDescriptor desc;
2472 desc.m_BeginMask = options->begin_mask;
2473 desc.m_EllipsisMask = options->ellipsis_mask;
2474 desc.m_EndMask = options->end_mask;
2475 desc.m_NewAxisMask = options->new_axis_mask;
2476 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
2477 desc.m_DataLayout = armnn::DataLayout::NHWC;
2478
Mike Kelly377fb212023-01-10 15:55:28 +00002479 armnn::TensorInfo beginTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002480 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2481
2482 std::vector<int> begin(beginTensorInfo.GetNumElements());
David Monahan39085f72023-07-28 11:37:29 +01002483 if (beginBufferPtr->data.data() != nullptr)
2484 {
2485 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
2486 }
2487 else
2488 {
2489 throw ParseException("ParseStridedSlice: Invalid input - the begin vector is null");
2490 }
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002491
Mike Kelly377fb212023-01-10 15:55:28 +00002492 armnn::TensorInfo endTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002493 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2494
2495 std::vector<int> end(endTensorInfo.GetNumElements());
David Monahan39085f72023-07-28 11:37:29 +01002496 if (endBufferPtr->data.data() != nullptr)
2497 {
2498 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
2499 }
2500 else
2501 {
2502 throw ParseException("ParseStridedSlice: Invalid input - the end vector is null");
2503 }
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002504
Mike Kelly377fb212023-01-10 15:55:28 +00002505 armnn::TensorInfo strideTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 3);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002506 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
2507
2508 std::vector<int> stride(strideTensorInfo.GetNumElements());
David Monahan39085f72023-07-28 11:37:29 +01002509
2510 if (strideBufferPtr->data.data() != nullptr)
2511 {
2512 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
2513 }
2514 else
2515 {
2516 throw ParseException("ParseStridedSlice: Invalid input - the stride vector is null");
2517 }
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002518
2519 desc.m_Begin = begin;
2520 desc.m_End = end;
2521 desc.m_Stride = stride;
2522
James Ward58dec6b2020-09-11 17:32:44 +01002523 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002524 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002525
2526 if (!layer)
2527 {
2528 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2529 operatorIndex, CHECK_LOCATION().AsString()));
2530 }
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002531
Mike Kelly377fb212023-01-10 15:55:28 +00002532 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002533 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2534
2535 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2536 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2537
2538 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2539 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2540}
2541
Kevin May7d96b162021-02-03 17:38:41 +00002542void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002543{
2544 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2545
Mike Kelly0d77ae12022-01-07 17:42:27 +00002546 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2547 const auto* options = operatorPtr->builtin_options.AsSubOptions();
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002548
2549 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2550 CHECK_VALID_SIZE(inputs.size(), 2);
2551
2552 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2553 CHECK_VALID_SIZE(outputs.size(), 1);
2554
Mike Kelly377fb212023-01-10 15:55:28 +00002555 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2556 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002557
James Ward58dec6b2020-09-11 17:32:44 +01002558 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002559 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Sub, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002560
2561 if (!layer)
2562 {
2563 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2564 operatorIndex, CHECK_LOCATION().AsString()));
2565 }
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002566
Mike Kelly377fb212023-01-10 15:55:28 +00002567 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002568 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2569
2570 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002571 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Teresa Charlind04873f2023-05-23 14:16:28 +01002572 if (options)
2573 {
2574 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2575 }
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002576
2577 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2578 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2579}
2580
Kevin May7d96b162021-02-03 17:38:41 +00002581void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302582{
2583 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2584
Mike Kelly0d77ae12022-01-07 17:42:27 +00002585 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2586 const auto* options = operatorPtr->builtin_options.AsDivOptions();
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302587
2588 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2589 CHECK_VALID_SIZE(inputs.size(), 2);
2590
2591 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2592 CHECK_VALID_SIZE(outputs.size(), 1);
2593
Mike Kelly377fb212023-01-10 15:55:28 +00002594 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2595 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302596
James Ward58dec6b2020-09-11 17:32:44 +01002597 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002598 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002599
2600 if (!layer)
2601 {
2602 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2603 operatorIndex, CHECK_LOCATION().AsString()));
2604 }
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302605
Mike Kelly377fb212023-01-10 15:55:28 +00002606 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302607 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2608
2609 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002610 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Teresa Charlind04873f2023-05-23 14:16:28 +01002611 if (options)
2612 {
2613 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2614 }
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302615
2616 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2617 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2618}
2619
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002620void TfLiteParserImpl::ParseFloorDiv(size_t subgraphIndex, size_t operatorIndex)
2621{
2622 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2623
2624 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2625 CHECK_VALID_SIZE(inputs.size(), 2);
2626
2627 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2628 CHECK_VALID_SIZE(outputs.size(), 1);
2629
Mike Kelly377fb212023-01-10 15:55:28 +00002630 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2631 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002632
2633 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002634 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002635
2636 if (!layer)
2637 {
2638 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2639 operatorIndex, CHECK_LOCATION().AsString()));
2640 }
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002641
Mike Kelly377fb212023-01-10 15:55:28 +00002642 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002643 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2644
2645 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2646 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2647 layer = AddFusedFloorLayer(layer, 0);
2648
2649 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2650 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2651}
2652
Kevin May7d96b162021-02-03 17:38:41 +00002653void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002654{
2655 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2656
Mike Kelly0d77ae12022-01-07 17:42:27 +00002657 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2658 const auto* options = operatorPtr->builtin_options.AsAddOptions();
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002659
2660 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2661 CHECK_VALID_SIZE(inputs.size(), 2);
2662
2663 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2664 CHECK_VALID_SIZE(outputs.size(), 1);
2665
Mike Kelly377fb212023-01-10 15:55:28 +00002666 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2667 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002668
James Ward58dec6b2020-09-11 17:32:44 +01002669 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002670 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Add, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002671
2672 if (!layer)
2673 {
2674 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2675 operatorIndex, CHECK_LOCATION().AsString()));
2676 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002677
Mike Kelly377fb212023-01-10 15:55:28 +00002678 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002679 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2680
2681 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002682 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Teresa Charlind04873f2023-05-23 14:16:28 +01002683 if (options)
2684 {
2685 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2686 }
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002687
2688 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2689 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2690}
2691
Kevin May7d96b162021-02-03 17:38:41 +00002692void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002693{
2694 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2695
Mike Kelly0d77ae12022-01-07 17:42:27 +00002696 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2697 const auto* options = operatorPtr->builtin_options.AsMulOptions();
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002698
2699 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2700 CHECK_VALID_SIZE(inputs.size(), 2);
2701
2702 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2703 CHECK_VALID_SIZE(outputs.size(), 1);
2704
Mike Kelly377fb212023-01-10 15:55:28 +00002705 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2706 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002707
James Ward58dec6b2020-09-11 17:32:44 +01002708 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002709 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Mul, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002710
2711 if (!layer)
2712 {
2713 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2714 operatorIndex, CHECK_LOCATION().AsString()));
2715 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002716
Mike Kelly377fb212023-01-10 15:55:28 +00002717 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002718 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2719
2720 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002721 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Teresa Charlind04873f2023-05-23 14:16:28 +01002722 if (options)
2723 {
2724 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2725 }
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002726
2727 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2728 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2729}
2730
Kevin May7d96b162021-02-03 17:38:41 +00002731void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002732{
2733 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2734
2735 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2736
2737 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2738 CHECK_VALID_SIZE(outputs.size(), 1);
2739
Teresa Charlin046e2cb2023-03-28 17:20:19 +01002740 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2741 TensorInfo dimTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002742
2743 armnn::MeanDescriptor desc;
Teresa Charlin046e2cb2023-03-28 17:20:19 +01002744 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2745 // Get const axis value from model and set it to descriptor.
2746 if (axisBufferPtr != nullptr)
2747 {
2748 std::vector<int32_t> axisData(dimTensorInfo.GetNumElements());
2749 ::memcpy(axisData.data(), axisBufferPtr->data.data(), dimTensorInfo.GetNumBytes());
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002750
Teresa Charlin046e2cb2023-03-28 17:20:19 +01002751 // Convert the axis to unsigned int and remove duplicates.
2752 auto rank = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
2753 std::set<unsigned int> uniqueAxis;
2754 std::transform(axisData.begin(),
2755 axisData.end(),
2756 std::inserter(uniqueAxis, uniqueAxis.begin()),
2757 [rank](int i)->unsigned int{
2758 return static_cast<uint32_t>(((i + rank) % rank)); });
2759 desc.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2760 }
2761 else
2762 {
2763 for (uint32_t i = 0; i < inputTensorInfo.GetNumDimensions(); ++i)
2764 {
2765 desc.m_Axis.push_back(i);
2766 }
2767 }
2768
Sadik Armagand109a4d2020-07-28 10:42:13 +01002769 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002770
Teresa Charlin046e2cb2023-03-28 17:20:19 +01002771 desc.m_KeepDims = inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ? true : false;
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002772
James Ward58dec6b2020-09-11 17:32:44 +01002773 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002774 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002775
2776 if (!layer)
2777 {
2778 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2779 operatorIndex, CHECK_LOCATION().AsString()));
2780 }
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002781
Mike Kelly377fb212023-01-10 15:55:28 +00002782 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002783 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2784
2785 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2786 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2787
2788 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2789 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2790}
2791
Kevin May7d96b162021-02-03 17:38:41 +00002792void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002793{
2794 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2795
Kevin May7d96b162021-02-03 17:38:41 +00002796 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002797
Kevin May7d96b162021-02-03 17:38:41 +00002798 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002799 CHECK_VALID_SIZE(outputs.size(), 1);
2800
Mike Kelly377fb212023-01-10 15:55:28 +00002801 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2802 armnn::TensorInfo padTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002803
Mike Kelly0d77ae12022-01-07 17:42:27 +00002804 std::vector<unsigned int> padBuffer = GetUIntBuffer(padTensorInfo, m_Model, inputs[1]->buffer);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002805
2806 size_t step = 2;
2807 armnn::PadDescriptor desc;
Mike Kelly0d77ae12022-01-07 17:42:27 +00002808 auto opcode = GetOpCode(m_Model, subgraphIndex, operatorIndex);
2809
2810 if (opcode == tflite::BuiltinOperator_PAD)
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002811 {
Mike Kelly0d77ae12022-01-07 17:42:27 +00002812 CHECK_VALID_SIZE(inputs.size(), 2);
2813
2814 if (inputTensorInfo.IsQuantized())
2815 {
2816 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2817 }
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002818 }
Mike Kelly0d77ae12022-01-07 17:42:27 +00002819 else if (opcode == tflite::BuiltinOperator_PADV2)
2820 {
2821 CHECK_VALID_SIZE(inputs.size(), 3);
2822
Mike Kelly377fb212023-01-10 15:55:28 +00002823 armnn::TensorInfo padValueTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002824
2825 if (padValueTensorInfo.GetNumElements() != 1)
2826 {
2827 ARMNN_THROW_PARSE_EXCEPTION("Multiple padding values are not supported in PADV2");
2828 }
2829 BufferRawPtr padValueBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2830
2831 // Get the pad value from the input tensor
2832 if (padValueBufferPtr->data.size() > 0)
2833 {
2834 switch (padValueTensorInfo.GetDataType())
2835 {
2836 case armnn::DataType::Float32:
2837 {
2838 std::vector<float> padValueBuffer(padValueTensorInfo.GetNumElements());
2839 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2840 desc.m_PadValue = padValueBuffer[0];
2841 break;
2842 }
2843 case armnn::DataType::QAsymmU8:
2844 {
2845 std::vector<uint8_t> padValueBuffer(padValueTensorInfo.GetNumElements());
2846 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2847 desc.m_PadValue = armnn::Dequantize<uint8_t>(padValueBuffer[0],
2848 padValueTensorInfo.GetQuantizationScale(),
2849 padValueTensorInfo.GetQuantizationOffset());
2850 break;
2851 }
2852 case armnn::DataType::QAsymmS8:
2853 case armnn::DataType::QSymmS8:
2854 {
2855 std::vector<int8_t> padValueBuffer(padValueTensorInfo.GetNumElements());
2856 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2857 desc.m_PadValue = armnn::Dequantize<int8_t>(padValueBuffer[0],
2858 padValueTensorInfo.GetQuantizationScale(),
2859 padValueTensorInfo.GetQuantizationOffset());
2860 break;
2861 }
2862 default: ARMNN_THROW_PARSE_EXCEPTION("Unsupported DataType");
2863 }
2864 }
2865 else if (inputTensorInfo.IsQuantized())
2866 {
2867 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2868 }
2869 }
2870
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002871 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2872 {
2873 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2874 }
2875
Mike Kelly0d77ae12022-01-07 17:42:27 +00002876 auto layerName = (opcode == tflite::BuiltinOperator_PAD) ? fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex)
2877 : fmt::format("PadV2:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01002878
2879 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002880
2881 if (!layer)
2882 {
2883 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2884 operatorIndex, CHECK_LOCATION().AsString()));
2885 }
2886
Mike Kelly377fb212023-01-10 15:55:28 +00002887 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002888 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2889
2890 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2891 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2892
2893 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2894 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2895}
2896
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002897void TfLiteParserImpl::ParseMirrorPad(size_t subgraphIndex, size_t operatorIndex)
2898{
2899 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2900
2901 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2902 CHECK_VALID_SIZE(inputs.size(), 2);
2903
2904 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2905 CHECK_VALID_SIZE(outputs.size(), 1);
2906
Mike Kelly377fb212023-01-10 15:55:28 +00002907 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002908
Mike Kelly377fb212023-01-10 15:55:28 +00002909 armnn::TensorInfo padTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002910 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2911
2912 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
2913 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
2914
2915 size_t step = 2;
2916 armnn::PadDescriptor desc;
2917 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2918 {
2919 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2920 }
2921
2922 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2923 const auto* options = operatorPtr->builtin_options.AsMirrorPadOptions();
2924
2925 if (options->mode == tflite::MirrorPadMode_REFLECT)
2926 {
2927 desc.m_PaddingMode = PaddingMode::Reflect;
2928 }
2929 else if (options->mode == tflite::MirrorPadMode_SYMMETRIC)
2930 {
2931 desc.m_PaddingMode = PaddingMode::Symmetric;
2932 }
2933 else
2934 {
2935 ARMNN_THROW_PARSE_EXCEPTION("PaddingMode must be either REFLECT or SYMMETRIC");
2936 }
2937
2938 // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
2939 // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
2940 auto inputShape = inputTensorInfo.GetShape();
2941 auto padList = desc.m_PadList;
2942
2943 const unsigned int isReflect = static_cast<unsigned int>(desc.m_PaddingMode == PaddingMode::Reflect);
2944 for(unsigned int i = 0; i < padList.size(); ++i)
2945 {
2946 if(padList.at(i).first > (inputShape[i] - isReflect) ||
2947 padList.at(i).second > (inputShape[i] - isReflect))
2948 {
2949 ARMNN_THROW_PARSE_EXCEPTION("Padding values must be less (Reflect) or "
2950 "equal (Symmetric) to the dimension size.");
2951 }
2952 }
2953
2954 auto layerName = fmt::format("MirrorPad:{}:{}", subgraphIndex, operatorIndex);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002955
2956 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01002957
2958 if (!layer)
2959 {
2960 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2961 operatorIndex, CHECK_LOCATION().AsString()));
2962 }
2963
Mike Kelly377fb212023-01-10 15:55:28 +00002964 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002965 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2966
2967 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2968 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2969
2970 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2971 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2972}
2973
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002974void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
2975{
2976 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2977
2978 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2979 CHECK_VALID_SIZE(inputs.size(), 2);
2980
2981 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2982 CHECK_VALID_SIZE(outputs.size(), 1);
2983
2984 auto layerName = fmt::format("Prelu:{}:{}", subgraphIndex, operatorIndex);
2985
Mike Kelly377fb212023-01-10 15:55:28 +00002986 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2987 armnn::TensorInfo alphaTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002988
2989 IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
Mike Kelly377fb212023-01-10 15:55:28 +00002990
Ryan OSheac229b3f2023-06-27 22:34:54 +01002991 if (!layer)
2992 {
2993 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
2994 operatorIndex, CHECK_LOCATION().AsString()));
2995 }
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002996
2997 if (IsConstTensor(inputs[1]))
2998 {
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002999 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawaratbf99b5f2021-05-27 09:55:43 +01003000 armnn::IInputSlot* slot = &(layer->GetInputSlot(0));
3001 RegisterConsumerOfTensor(subgraphIndex, inputTensorIndexes[0], slot);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01003002
Mike Kelly5880b912022-01-28 16:18:54 +00003003 auto alphaTensorAndData = CreateConstTensorNonPermuted(inputs[1], alphaTensorInfo,
3004 inputTensorInfo.GetDataType());
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01003005 std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name);
3006 IConnectableLayer* constLayer =
Mike Kelly5880b912022-01-28 16:18:54 +00003007 m_Network->AddConstantLayer(alphaTensorAndData.first, constLayerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01003008
3009 if (!constLayer)
3010 {
3011 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3012 operatorIndex, CHECK_LOCATION().AsString()));
3013 }
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01003014
3015 constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
3016 constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
3017 RegisterOutputSlots(subgraphIndex,
3018 VIRTUAL_OPERATOR_ID,
3019 constLayer,
3020 { inputTensorIndexes[1] });
3021 }
3022 else
3023 {
3024 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3025 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIndexes);
3026 }
3027
Mike Kelly377fb212023-01-10 15:55:28 +00003028 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
3029 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
3030 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3031
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01003032 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3033 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3034}
3035
Kevin May7d96b162021-02-03 17:38:41 +00003036void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan66dedc72019-12-10 16:32:07 +00003037{
3038 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3039
3040 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3041 CHECK_VALID_SIZE(inputs.size(), 1);
3042
3043 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3044 CHECK_VALID_SIZE(outputs.size(), 1);
3045
James Ward58dec6b2020-09-11 17:32:44 +01003046 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00003047
3048 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01003049
3050 if (!layer)
3051 {
3052 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3053 operatorIndex, CHECK_LOCATION().AsString()));
3054 }
Sadik Armagan66dedc72019-12-10 16:32:07 +00003055
Mike Kelly377fb212023-01-10 15:55:28 +00003056 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan66dedc72019-12-10 16:32:07 +00003057 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3058
3059 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3060 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3061
3062 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3063 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3064}
Finn Williamsc42c3842019-01-22 14:18:11 +00003065
Kevin May7d96b162021-02-03 17:38:41 +00003066void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01003067{
Finn Williamsc42c3842019-01-22 14:18:11 +00003068 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01003069}
3070
Kevin May7d96b162021-02-03 17:38:41 +00003071void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01003072{
Finn Williamsc42c3842019-01-22 14:18:11 +00003073 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
3074}
Sadik Armagan58f39192018-09-17 14:14:39 +01003075
Kevin May7d96b162021-02-03 17:38:41 +00003076void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan12239e72020-05-27 11:06:17 +01003077{
Jan Eilers2f746b32020-07-28 14:00:06 +01003078 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01003079}
3080
Kevin May7d96b162021-02-03 17:38:41 +00003081void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsc42c3842019-01-22 14:18:11 +00003082{
3083 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
3084}
3085
Kevin May7d96b162021-02-03 17:38:41 +00003086void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd99851762019-04-09 09:37:38 +01003087{
3088 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
3089}
3090
Kevin May7d96b162021-02-03 17:38:41 +00003091void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
Matthew Sloyan7515d072020-12-16 12:50:01 +00003092{
3093 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
3094}
3095
Kevin May7d96b162021-02-03 17:38:41 +00003096void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
Jan Eilers2f746b32020-07-28 14:00:06 +01003097{
3098 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
3099}
Finn Williamsc42c3842019-01-22 14:18:11 +00003100
Kevin May7d96b162021-02-03 17:38:41 +00003101void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
Finn Williamsc42c3842019-01-22 14:18:11 +00003102{
3103 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00003104 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00003105 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01003106
3107 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3108 CHECK_VALID_SIZE(inputs.size(), 1);
3109
3110 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3111 CHECK_VALID_SIZE(outputs.size(), 1);
3112
James Ward58dec6b2020-09-11 17:32:44 +01003113 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01003114 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00003115 activationDesc.m_Function = activationType;
3116
3117 switch (activationType)
3118 {
3119 case ActivationFunction::ReLu:
3120 {
James Ward58dec6b2020-09-11 17:32:44 +01003121 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00003122 break;
3123 }
3124 case ActivationFunction::BoundedReLu:
3125 {
James Ward58dec6b2020-09-11 17:32:44 +01003126 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00003127 activationDesc.m_A = 6.0f;
3128 activationDesc.m_B = 0.0f;
3129 break;
3130 }
3131 case ActivationFunction::Sigmoid:
3132 {
James Ward58dec6b2020-09-11 17:32:44 +01003133 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00003134 break;
3135 }
Nina Drozd99851762019-04-09 09:37:38 +01003136 case ActivationFunction::TanH:
3137 {
James Ward58dec6b2020-09-11 17:32:44 +01003138 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01003139 activationDesc.m_A = 1.0f;
3140 activationDesc.m_B = 1.0f;
3141 break;
3142 }
Sadik Armagan12239e72020-05-27 11:06:17 +01003143 case ActivationFunction::LeakyReLu:
3144 {
James Ward58dec6b2020-09-11 17:32:44 +01003145 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00003146 const auto* options = operatorPtr->builtin_options.AsLeakyReluOptions();
Sadik Armagan12239e72020-05-27 11:06:17 +01003147 activationDesc.m_A = options->alpha;
3148 break;
3149 }
Matthew Sloyan7515d072020-12-16 12:50:01 +00003150 case ActivationFunction::Elu:
3151 {
3152 layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
3153 activationDesc.m_A = 1.0f;
3154 break;
3155 }
Jan Eilers2f746b32020-07-28 14:00:06 +01003156 case ActivationFunction::HardSwish:
Matthew Sloyan7515d072020-12-16 12:50:01 +00003157 {
James Ward58dec6b2020-09-11 17:32:44 +01003158 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01003159 break;
Matthew Sloyan7515d072020-12-16 12:50:01 +00003160 }
Finn Williamsc42c3842019-01-22 14:18:11 +00003161 default:
3162 {
3163 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003164 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
3165 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00003166 }
3167 }
3168
3169 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01003170
Mike Kelly377fb212023-01-10 15:55:28 +00003171 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan58f39192018-09-17 14:14:39 +01003172 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3173
3174 // register the input connection slots for the layer, connections are made after all layers have been created
3175 // only the tensors for the inputs are relevant, exclude the const tensors
3176 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3177 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3178
3179 // register the output connection slots for the layer, connections are made after all layers have been created
3180 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3181 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3182}
Mike Kelly0d77ae12022-01-07 17:42:27 +00003183armnn::TensorInfo TfLiteParserImpl::OutputShapeOfReshape(const armnn::TensorInfo& inputTensorInfo,
3184 const std::vector<int32_t>& targetDimsIn)
Sadikb94967b2018-09-19 15:30:00 +01003185{
3186 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
3187 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
3188
3189 if (stretchDim != targetDimsIn.end())
3190 {
3191 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
3192 {
3193 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003194 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01003195 }
3196
3197 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003198 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01003199 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
3200
3201 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
3202 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
3203 }
3204
3205 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
3206
3207 TensorInfo reshapeInfo = inputTensorInfo;
3208 reshapeInfo.SetShape(outputShape);
3209
3210 return reshapeInfo;
3211}
3212
Kevin May7d96b162021-02-03 17:38:41 +00003213void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
Sadikb94967b2018-09-19 15:30:00 +01003214{
3215 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3216
3217 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01003218
3219 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3220 CHECK_VALID_SIZE(outputs.size(), 1);
3221
Mike Kelly0d77ae12022-01-07 17:42:27 +00003222 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3223 const auto* options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01003224 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01003225
Mike Kelly377fb212023-01-10 15:55:28 +00003226 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
kevmay0171972a82018-12-17 14:28:03 +00003227 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01003228 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00003229
Jan Eilersbac9b352020-07-13 13:40:24 +01003230 // Extracting new shape for the output
3231 // There are two ways it can be passed
3232 // * First is to define the target shape in the operator built-in options
3233 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00003234 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01003235 bool targetShapeFound = false;
3236 // Check if built-in options were given
3237 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00003238 {
Jan Eilersbac9b352020-07-13 13:40:24 +01003239 // make sure the parameter is given
3240 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00003241 {
Jan Eilersbac9b352020-07-13 13:40:24 +01003242 targetShape = options->new_shape;
3243 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00003244 }
Derek Lambertic9e52792020-03-11 11:42:26 +00003245 }
Jan Eilersbac9b352020-07-13 13:40:24 +01003246
3247 // If there is no built-in option given or if the built-in new_shape parameter was empty
3248 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00003249 {
Teresa Charlin6a056a42021-12-01 10:25:43 +00003250 // Check for a second input tensor
3251 if (inputs.size() > 1 && inputs[1] != nullptr)
Jan Eilersbac9b352020-07-13 13:40:24 +01003252 {
3253 if (inputs[1]->is_variable)
3254 {
3255 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
3256 }
3257
3258 if (inputs[1]->shape.size() != 1)
3259 {
3260 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
3261 }
3262
3263 if (inputs[1]->type != tflite::TensorType_INT32)
3264 {
3265 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
3266 }
3267
Teresa Charlin6a056a42021-12-01 10:25:43 +00003268 // Extract target shape from input
3269 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3270 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
Cathal Corbettd2f73232021-12-10 13:38:52 +00003271 if (values)
Sadik Armagan19a1c032021-01-20 12:17:00 +00003272 {
Cathal Corbettd2f73232021-12-10 13:38:52 +00003273 for (int i = 0; i < inputs[1]->shape[0]; ++i)
3274 {
3275 targetShape.push_back(values[i]);
3276 }
Sadik Armagan19a1c032021-01-20 12:17:00 +00003277 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00003278 else
Jan Eilersbac9b352020-07-13 13:40:24 +01003279 {
Cathal Corbettd2f73232021-12-10 13:38:52 +00003280 try
3281 {
3282 // We attempt to infer during Runtime.
Mike Kelly04d82292023-01-19 18:29:40 +00003283 TensorShape reshapeShapes = ToTensorInfo(inputs[1]).GetShape();
3284
3285 if (reshapeShapes[0] == actualOutputTensorInfo.GetNumDimensions())
3286 {
3287 for (unsigned int i = 0; i < actualOutputTensorInfo.GetShape().GetNumDimensions(); ++i)
3288 {
3289 targetShape.push_back(actualOutputTensorInfo.GetShape()[i]);
3290 }
3291 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00003292 // The parser only supports shape (batch, -1) or (-1) for non-constant shape input.
Mike Kelly04d82292023-01-19 18:29:40 +00003293 else if (reshapeShapes[0] > 2)
Cathal Corbettd2f73232021-12-10 13:38:52 +00003294 {
3295 throw ParseException(fmt::format("Invalid input shape '{}' in Reshape layer '{}' {}. "
3296 "When inferring during runtime, the parser only supports "
3297 "shape (batch, -1) or (-1) for target shape input.",
3298 reshapeShapes[0],
3299 layerName,
3300 CHECK_LOCATION().AsString()));
3301 }
Mike Kelly04d82292023-01-19 18:29:40 +00003302 else
Cathal Corbettd2f73232021-12-10 13:38:52 +00003303 {
Mike Kelly04d82292023-01-19 18:29:40 +00003304 const int32_t numInputElements = inputTensorInfo.GetNumElements();
3305 const int32_t inputTensorShape = inputTensorInfo.GetShape()[0];
3306 if (reshapeShapes[0] == 1)
3307 {
3308 targetShape = {numInputElements};
3309 }
3310 else if (reshapeShapes[0] == 2)
3311 {
3312 targetShape = {inputTensorShape, numInputElements / inputTensorShape};
3313 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00003314 }
3315 }
3316 catch (const std::exception& exc)
3317 {
3318 ARMNN_THROW_PARSE_EXCEPTION("Failed attempt to infer during runtime the target shape input for "
3319 "Reshape operation. Reshape operator target shape input buffer data "
3320 "is null. " << exc.what());
3321 }
Jan Eilersbac9b352020-07-13 13:40:24 +01003322 }
3323 }
3324 else
Derek Lambertic9e52792020-03-11 11:42:26 +00003325 {
3326 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
3327 "At least one method required");
3328 }
Derek Lambertic9e52792020-03-11 11:42:26 +00003329 }
3330
kevmay0171972a82018-12-17 14:28:03 +00003331 armnn::TensorInfo reshapeOutputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00003332 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01003333
kevmay0171972a82018-12-17 14:28:03 +00003334 // Check for valid input size and that reshape parameters equal output shape
Cathal Corbett2b922e22022-09-23 15:49:24 +01003335 // The output shape can be provided to us in 2 ways:
3336 // 1. through the normal 'shape' parameter given by outputs[indx]->shape
3337 // 2. through additional parameter 'shape_signature' given by outputs[indx]->buffer.
3338 // This parameter can sometimes contain -1 value not visible in the 'shape' parameter.
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00003339 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
3340 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00003341 {
Cathal Corbett2b922e22022-09-23 15:49:24 +01003342 // Attempt to extract output shape from secondary 'shape_signature'
3343 // parameter and try to CheckShape() with this param.
3344 std::vector<int32_t> secondaryOutputTargetShape = outputs[0]->shape_signature;
3345
3346 // if outputs[0]->shape_signature contain a -1 value, we need to compute its actual value
3347 // from reshape input in order to correctly verify reshape parameters equal output shape
3348 armnn::TensorInfo secondaryReshapeOutputTensorInfo =
3349 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, secondaryOutputTargetShape);
3350
3351 if (!CheckShape(reshapeOutputTensorShape, secondaryReshapeOutputTensorInfo.GetShape()))
3352 {
3353 std::stringstream ss;
3354 ss << "New shape defined in reshape parameters "
3355 << reshapeOutputTensorShape
3356 << " does not equal output shape "
3357 << actualOutputTensorInfo.GetShape()
3358 << ": "
3359 << CHECK_LOCATION().AsString();
3360 throw ParseException(ss.str());
3361 }
kevmay0171972a82018-12-17 14:28:03 +00003362 }
Mike Kelly377fb212023-01-10 15:55:28 +00003363 auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
kevmay0171972a82018-12-17 14:28:03 +00003364
Sadikb94967b2018-09-19 15:30:00 +01003365 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00003366 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Mike Kelly377fb212023-01-10 15:55:28 +00003367 m_TensorInfos[outputTensorIds[0]] = reshapeOutputTensorInfo;
Sadikb94967b2018-09-19 15:30:00 +01003368
Sadikb94967b2018-09-19 15:30:00 +01003369 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01003370
3371 if (!layer)
3372 {
3373 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3374 operatorIndex, CHECK_LOCATION().AsString()));
3375 }
3376
kevmay0171972a82018-12-17 14:28:03 +00003377 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01003378
3379 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3380 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3381
3382 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3383 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3384}
3385
Kevin May7d96b162021-02-03 17:38:41 +00003386void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003387{
Sadik Armagana3b31f02019-12-05 09:08:53 +00003388 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
3389}
3390
Kevin May7d96b162021-02-03 17:38:41 +00003391void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagana3b31f02019-12-05 09:08:53 +00003392{
3393 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
3394}
3395
Kevin May7d96b162021-02-03 17:38:41 +00003396void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
Sadik Armagana3b31f02019-12-05 09:08:53 +00003397{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003398 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3399
3400 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3401 CHECK_VALID_SIZE(inputs.size(), 2);
3402
3403 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3404 CHECK_VALID_SIZE(outputs.size(), 1);
3405
Mike Kelly377fb212023-01-10 15:55:28 +00003406 armnn::TensorInfo sizeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003407
3408 // Data for the parsed tensor args (size) must be stored locally.
3409 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
3410
3411 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3412 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
3413
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01003414 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00003415 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003416 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01003417 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
3418 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003419
James Ward58dec6b2020-09-11 17:32:44 +01003420 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00003421
3422 switch (resizeMethod)
3423 {
3424 case ResizeMethod::Bilinear:
3425 {
James Ward58dec6b2020-09-11 17:32:44 +01003426 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00003427
3428 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3429 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
3430
David Monahan4a0c9b92020-05-30 09:48:39 +01003431 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00003432 break;
3433 }
3434 case ResizeMethod::NearestNeighbor:
3435 {
James Ward58dec6b2020-09-11 17:32:44 +01003436 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00003437 break;
3438 }
3439 default:
3440 {
3441 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003442 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
3443 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00003444 }
3445 }
3446
Mike Kelly377fb212023-01-10 15:55:28 +00003447 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01003448
3449 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01003450
3451 if (!layer)
3452 {
3453 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3454 operatorIndex, CHECK_LOCATION().AsString()));
3455 }
3456
Mike Kelly377fb212023-01-10 15:55:28 +00003457 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
3458 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003459 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3460
3461 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3462 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3463
3464 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3465 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3466}
3467
Tianle Chenge5a30ff2023-07-03 11:24:12 +01003468void TfLiteParserImpl::ParseReverseV2(size_t subgraphIndex, size_t operatorIndex)
3469{
3470 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3471
3472 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3473 CHECK_VALID_SIZE(inputs.size(), 2);
3474
3475 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3476 CHECK_VALID_SIZE(outputs.size(), 1);
3477
3478 auto layerName = fmt::format("ReverseV2:{}:{}", subgraphIndex, operatorIndex);
3479
3480 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3481 TensorInfo axisTensorInfo = ToTensorInfo(inputs[1]);
3482 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
3483
Tracy Narinebb8d7592023-07-13 16:50:54 +01003484 IConnectableLayer* layer = m_Network->AddReverseV2Layer(layerName.c_str());
Tianle Chenge5a30ff2023-07-03 11:24:12 +01003485 ARMNN_ASSERT(layer != nullptr);
3486
3487 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3488
3489 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Tracy Narinebb8d7592023-07-13 16:50:54 +01003490 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Tianle Chenge5a30ff2023-07-03 11:24:12 +01003491
3492 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3493 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3494}
3495
Teresa Charlin777008b2023-07-26 10:07:55 +01003496void TfLiteParserImpl::ParseTile(size_t subgraphIndex, size_t operatorIndex)
3497{
3498 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3499
3500 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3501 CHECK_VALID_SIZE(inputs.size(), 2);
3502
3503 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3504 CHECK_VALID_SIZE(outputs.size(), 1);
3505
3506 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3507 TensorInfo multiplesTensorInfo = ToTensorInfo(inputs[1]);
3508 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
3509
3510 auto layerName = fmt::format("Tile:{}:{}", subgraphIndex, operatorIndex);
3511
3512 TileDescriptor descriptor;
3513
3514 BufferRawPtr multiplesBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3515 if (multiplesBufferPtr != nullptr)
3516 {
3517 std::vector<int32_t> multiplesData(multiplesTensorInfo.GetNumElements());
3518 ::memcpy(multiplesData.data(), multiplesBufferPtr->data.data(), multiplesTensorInfo.GetNumBytes());
3519 descriptor.m_Multiples.assign(multiplesData.begin(), multiplesData.end());
3520 }
3521 else
3522 {
3523 ARMNN_THROW_PARSE_EXCEPTION("For Tile layer, Multiples data was not found in the buffer.");
3524 }
3525
3526 IConnectableLayer* layer = m_Network->AddTileLayer(descriptor, layerName.c_str());
3527 ARMNN_ASSERT(layer != nullptr);
3528
3529 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3530
3531 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3532 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3533
3534 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3535 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3536}
3537
Kevin May7d96b162021-02-03 17:38:41 +00003538void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan479045b2018-10-01 11:51:37 +01003539{
3540 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3541
Mike Kelly0d77ae12022-01-07 17:42:27 +00003542 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3543 const auto* options = operatorPtr->builtin_options.AsConcatenationOptions();
Sadik Armagan479045b2018-10-01 11:51:37 +01003544
3545 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
3546
3547 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3548 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Mike Kelly377fb212023-01-10 15:55:28 +00003549 auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex);
3550
Sadik Armagan479045b2018-10-01 11:51:37 +01003551 CHECK_VALID_SIZE(outputs.size(), 1);
3552
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003553 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
Mike Kelly377fb212023-01-10 15:55:28 +00003554 uint32_t inputRank = InputTensorInfo(subgraphIndex, operatorIndex, 0).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01003555
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003556 const unsigned int concatDimInput = static_cast<unsigned int>(
Mike Kelly377fb212023-01-10 15:55:28 +00003557 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01003558
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003559 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
3560 concatDescriptor.SetConcatAxis(concatDimInput);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003561 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01003562
3563 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
3564 {
Mike Kelly377fb212023-01-10 15:55:28 +00003565 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, viewIndex);
Sadik Armagan479045b2018-10-01 11:51:37 +01003566
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003567 // This set up concatDescriptor view origin
3568 armnnUtils::ProcessConcatInputTensorInfo(
Mike Kelly377fb212023-01-10 15:55:28 +00003569 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01003570 }
3571
James Ward58dec6b2020-09-11 17:32:44 +01003572 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01003573
Jim Flynn906f9462019-05-10 13:55:21 +01003574 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01003575
3576 if (!layer)
3577 {
3578 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3579 operatorIndex, CHECK_LOCATION().AsString()));
3580 }
3581
Mike Kelly377fb212023-01-10 15:55:28 +00003582 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003583 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01003584
James Conroy05102392020-06-24 15:39:55 +01003585 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003586 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01003587
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003588 // add fused activation layer
3589 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01003590
Sadik Armagan479045b2018-10-01 11:51:37 +01003591 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3592 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3593}
3594
Kevin May7d96b162021-02-03 17:38:41 +00003595void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003596{
3597 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3598
Mike Kelly0d77ae12022-01-07 17:42:27 +00003599 const auto& operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003600 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
3601
3602 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
3603
3604 FullyConnectedDescriptor desc;
3605 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01003606 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003607
3608 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3609 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3610 CHECK_VALID_SIZE(outputs.size(), 1);
3611
Mike Kelly377fb212023-01-10 15:55:28 +00003612 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003613
3614 // Fully Connected Layer accepts two dimensional weights input
3615 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
3616 if (weightsDimension != 2)
3617 {
3618 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003619 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
3620 "Node {}",
3621 weightsDimension,
3622 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003623 }
3624
Matthew Jackson74bf7da2019-08-16 16:51:42 +01003625 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01003626 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003627
Matthew Sloyan81beae32021-07-13 19:46:11 +01003628 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3629 // Add the first input tensor to the registration list
3630 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0]};
Mike Kelly377fb212023-01-10 15:55:28 +00003631 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003632
3633 desc.m_ConstantWeights = IsConstTensor(inputs[1]);
3634
Matthew Sloyan81beae32021-07-13 19:46:11 +01003635 // Add the weights input to the registration list, constant layers will be added by SetupConstantLayers if constant.
3636 tensorIndexesToRegister.emplace_back(inputTensorIndexes[1]);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003637
Mike Kelly0506ef02023-01-03 16:29:44 +00003638 if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
Mike Kelly5880b912022-01-28 16:18:54 +00003639 {
3640 m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
3641 }
3642
Finn Williamsd4fa5452021-03-01 12:31:41 +00003643 if (inputs.size() == 3)
3644 {
3645 desc.m_BiasEnabled = true;
Mike Kelly377fb212023-01-10 15:55:28 +00003646 armnn::TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Matthew Sloyan81beae32021-07-13 19:46:11 +01003647
3648 // Add the biases input to the registration list, constant layer will be added by SetupConstantLayers.
3649 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
Mike Kelly5880b912022-01-28 16:18:54 +00003650
Mike Kelly0506ef02023-01-03 16:29:44 +00003651 if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
Mike Kelly5880b912022-01-28 16:18:54 +00003652 {
3653 m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
3654 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00003655 }
3656
Matthew Sloyan81beae32021-07-13 19:46:11 +01003657 // Filters and biases are always passed to fully connected as inputs
3658 layer = m_Network->AddFullyConnectedLayer(desc, layerName.c_str());
Finn Williamsd4fa5452021-03-01 12:31:41 +00003659
Ryan OSheac229b3f2023-06-27 22:34:54 +01003660 if (!layer)
3661 {
3662 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3663 operatorIndex, CHECK_LOCATION().AsString()));
3664 }
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003665
Finn Williamsd4fa5452021-03-01 12:31:41 +00003666 unsigned int startingSlotIndex = 0;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003667 if (inputTensorInfo.GetNumDimensions() > 2)
3668 {
3669 // Add reshape to flatten to 2D [batch_size, input_size],
3670 // where "input_size" corresponds to the number of inputs to the layer,
3671 // matching the second dimension of weights,
3672 // and "batch_size" is calculated by dividing the number of elements by "input_size".
3673 std::vector<unsigned int> reshapedDimensions(2);
3674 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
3675 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
3676
3677 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
3678 {
3679 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003680 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
3681 reshapedDimensions[1],
3682 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003683 }
3684
Mike Kelly377fb212023-01-10 15:55:28 +00003685 armnn::TensorInfo reshapedTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003686 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
Mike Kelly377fb212023-01-10 15:55:28 +00003687 inputTensorInfo = reshapedTensorInfo;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003688
James Ward58dec6b2020-09-11 17:32:44 +01003689 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Finn Williamsd4fa5452021-03-01 12:31:41 +00003690 armnn::ReshapeDescriptor reshapeDescriptor;
3691 reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
Mike Kelly04d82292023-01-19 18:29:40 +00003692 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor,
3693 reshapeLayerName.c_str());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003694
3695 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
3696 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
3697
3698 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
Finn Williamsd4fa5452021-03-01 12:31:41 +00003699 // Fc layer connects to the reshape layer, so we skip the first input slot when registering fc's input slots
3700 tensorIndexesToRegister.erase(tensorIndexesToRegister.begin());
3701 startingSlotIndex = 1;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003702 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00003703
3704 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister, startingSlotIndex);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003705
Mike Kelly377fb212023-01-10 15:55:28 +00003706 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromShapes(subgraphIndex, operatorIndex, layer, 0,
3707 { inputTensorInfo.GetShape(),
3708 filterTensorInfo.GetShape() });
Mike Kelly04d82292023-01-19 18:29:40 +00003709
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003710 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3711
Mike Kelly04d82292023-01-19 18:29:40 +00003712 if (outputTensorInfo.GetNumDimensions() > 2)
3713 {
3714 // Calculate reshape to flatten to 2D [batch_size, input_size]
3715 std::vector<unsigned int> reshapedDimensions(2);
3716 reshapedDimensions[1] = filterTensorInfo.GetShape()[0];
3717 reshapedDimensions[0] = outputTensorInfo.GetNumElements() / reshapedDimensions[1];
3718 armnn::TensorInfo reshapedOutputTensorInfo = outputTensorInfo;
3719 if (outputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
3720 {
3721 throw ParseException(
3722 fmt::format("Failed to deduce output tensor shape from filter size {} {}",
3723 reshapedDimensions[1],
3724 CHECK_LOCATION().AsString()));
3725 }
3726 reshapedOutputTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
3727 layer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
3728
3729 std::string reshapeLayerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
3730 layer = AddReshapeLayer(layer, 0, reshapeLayerName, outputTensorInfo);
3731 }
3732
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003733 // we need to add the activation layer and fortunately we don't need to care about the data layout
3734 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
3735 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003736
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003737 // register the output connection slots for the layer, connections are made after all layers have been created
3738 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3739 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
Mike Kelly04d82292023-01-19 18:29:40 +00003740
3741 m_TensorInfos[outputTensorIndexes[0]] = layer->GetOutputSlot(0).GetTensorInfo();
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003742}
3743
Kevin May7d96b162021-02-03 17:38:41 +00003744void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
keidav011b3e2ea2019-02-21 10:07:37 +00003745{
3746 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3747
Mike Kelly0d77ae12022-01-07 17:42:27 +00003748 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
keidav011b3e2ea2019-02-21 10:07:37 +00003749
3750 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3751 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3752 CHECK_VALID_SIZE(outputs.size(), 4);
3753
3754 // Obtain custom options from flexbuffers
3755 auto custom_options = operatorPtr->custom_options;
3756 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
3757
3758 // Obtain descriptor information from tf lite
3759 DetectionPostProcessDescriptor desc;
3760 desc.m_MaxDetections = m["max_detections"].AsUInt32();
3761 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
3762 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
3763 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
3764 desc.m_NumClasses = m["num_classes"].AsUInt32();
3765 desc.m_ScaleH = m["h_scale"].AsFloat();
3766 desc.m_ScaleW = m["w_scale"].AsFloat();
3767 desc.m_ScaleX = m["x_scale"].AsFloat();
3768 desc.m_ScaleY = m["y_scale"].AsFloat();
3769
keidav0107d58c72019-02-26 11:57:39 +00003770 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00003771 {
keidav0107d58c72019-02-26 11:57:39 +00003772 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00003773 }
3774 if (!(m["detections_per_class"].IsNull()))
3775 {
3776 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
3777 }
3778
3779 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
3780 {
3781 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
3782 "must be positive and less than or equal to 1.");
3783 }
3784
Mike Kelly377fb212023-01-10 15:55:28 +00003785 armnn::TensorInfo anchorTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003786 auto anchorTensorAndData = CreateConstTensorNonPermuted(inputs[2], anchorTensorInfo);
keidav011b3e2ea2019-02-21 10:07:37 +00003787
James Ward58dec6b2020-09-11 17:32:44 +01003788 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003789 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData,
keidav011b3e2ea2019-02-21 10:07:37 +00003790 layerName.c_str());
3791
Ryan OSheac229b3f2023-06-27 22:34:54 +01003792 if (!layer)
3793 {
3794 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3795 operatorIndex, CHECK_LOCATION().AsString()));
3796 }
keidav011b3e2ea2019-02-21 10:07:37 +00003797
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003798 // The model does not specify the output shapes.
3799 // The output shapes are calculated from the max_detection and max_classes_per_detection.
3800 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
Mike Kelly377fb212023-01-10 15:55:28 +00003801 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox, 4 });
3802 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox });
3803 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox });
3804 m_OverriddenOutputShapes.push_back({ 1 });
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003805
keidav011b3e2ea2019-02-21 10:07:37 +00003806 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
3807 {
Mike Kelly377fb212023-01-10 15:55:28 +00003808 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverriddenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00003809 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
3810 }
3811
3812 // Register the input connection slots for the layer, connections are made after all layers have been created
3813 // only the tensors for the inputs are relevant, exclude the const tensors
3814 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3815 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3816
3817 // Register the output connection slots for the layer, connections are made after all layers have been created
3818 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3819 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
3820 outputTensorIndexes[1],
3821 outputTensorIndexes[2],
3822 outputTensorIndexes[3]});
3823}
3824
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003825/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
Kevin May7d96b162021-02-03 17:38:41 +00003826void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003827{
3828 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3829
3830 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3831 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3832 CHECK_VALID_SIZE(outputs.size(), 1);
3833
3834 if (inputs.size() < 1)
3835 {
3836 throw ParseException("Pack must have at least one input.");
3837 }
3838
3839 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3840 const auto* options = operatorPtr->builtin_options.AsPackOptions();
3841
3842 StackDescriptor desc;
3843 desc.m_Axis = static_cast<uint32_t>(options->axis);
3844 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
3845
3846 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00003847 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003848 desc.m_InputShape = inputTensorInfo.GetShape();
3849
James Ward58dec6b2020-09-11 17:32:44 +01003850 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003851 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
3852
Ryan OSheac229b3f2023-06-27 22:34:54 +01003853 if (!layer)
3854 {
3855 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
3856 operatorIndex, CHECK_LOCATION().AsString()));
3857 }
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003858
Mike Kelly377fb212023-01-10 15:55:28 +00003859 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003860 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3861
3862 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3863 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
3864
3865 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3866 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3867}
3868
Mike Kelly5880b912022-01-28 16:18:54 +00003869void TfLiteParserImpl::ParseUnidirectionalSequenceLSTM(size_t subgraphIndex, size_t operatorIndex)
3870{
3871 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3872
3873 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3874 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3875
3876 if (inputs.size() < 2)
3877 {
3878 throw ParseException("UnidirectionalSequenceLSTM must have at least 2 input.");
3879 }
3880
3881 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3882 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
3883 const auto nodeParams = operatorPtr->builtin_options.AsUnidirectionalSequenceLSTMOptions();
3884 CHECK_SUPPORTED_FUSED_ACTIVATION(nodeParams, subgraphIndex, operatorIndex);
Mike Kelly377fb212023-01-10 15:55:28 +00003885 auto inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly5880b912022-01-28 16:18:54 +00003886 auto outputTensorInfo = ToTensorInfo(outputs[0]);
3887
3888 // Set the params structure for the AddUnidirectionalSequenceLstmLayer call
3889 // Please refer to each operand at
3890 // https://www.tensorflow.org/mlir/tfl_ops#tflunidirectional_sequence_lstm_tflunidirectionalsequencelstmop
3891 armnn::LstmInputParams params;
3892
3893 if (IsOptionalOperandPresent(operatorPtr->inputs[1]))
3894 {
3895 params.m_InputToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[1]].get(),
3896 inputTensorInfo).first;
3897 }
3898
3899 params.m_InputToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[2]].get(),
3900 inputTensorInfo).first;
3901 params.m_InputToCellWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[3]].get(),
3902 inputTensorInfo).first;
3903 params.m_InputToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[4]].get(),
3904 inputTensorInfo).first;
3905
3906 // Recurrent weight tensors of size {n_cell, n_output}
3907 if (IsOptionalOperandPresent(operatorPtr->inputs[5]))
3908 {
3909 params.m_RecurrentToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[5]].get(),
3910 inputTensorInfo).first;
3911 }
3912
3913 params.m_RecurrentToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[6]].get(),
3914 inputTensorInfo).first;
3915 params.m_RecurrentToCellWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[7]].get(),
3916 inputTensorInfo).first;
3917 params.m_RecurrentToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[8]].get(),
3918 inputTensorInfo).first;
3919
3920 // Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
3921 if (IsOptionalOperandPresent(operatorPtr->inputs[9]))
3922 {
3923 params.m_CellToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[9]].get(),
3924 inputTensorInfo).first;
3925 }
3926
3927 if (IsOptionalOperandPresent(operatorPtr->inputs[10]))
3928 {
3929 params.m_CellToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[10]].get(),
3930 inputTensorInfo).first;
3931 }
3932
3933 if (IsOptionalOperandPresent(operatorPtr->inputs[11]))
3934 {
3935 params.m_CellToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[11]].get(),
3936 inputTensorInfo).first;
3937 }
3938
3939 // Gates bias tensors of size {n_cell}
3940 if (IsOptionalOperandPresent(operatorPtr->inputs[12]))
3941 {
3942 params.m_InputGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[12]].get(),
3943 inputTensorInfo).first;
3944 }
3945
3946 params.m_ForgetGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[13]].get(),
3947 inputTensorInfo).first;
3948 params.m_CellBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[14]].get(),
3949 inputTensorInfo).first;
3950 params.m_OutputGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[15]].get(),
3951 inputTensorInfo).first;
3952
3953 // Projection weight tensor of size {n_output, n_cell}
3954 if (IsOptionalOperandPresent(operatorPtr->inputs[16]))
3955 {
3956 params.m_ProjectionWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[16]].get(),
3957 inputTensorInfo).first;
3958 }
3959 // Projection bias tensor of size {n_output}
3960 if (IsOptionalOperandPresent(operatorPtr->inputs[17]))
3961 {
3962 params.m_ProjectionBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[17]].get(),
3963 inputTensorInfo).first;
3964 }
3965
3966 // These state tensors are defined as variable tensors, and will be modified by this op.
3967 armnn::TensorInfo outputStateInInfo = ToTensorInfo(subgraphPtr->tensors[operatorPtr->inputs[18]].get());
3968 m_ConstantsToBeCreated.push_back(operatorPtr->inputs[18]);
3969 armnn::TensorInfo cellStateInInfo = ToTensorInfo(subgraphPtr->tensors[operatorPtr->inputs[19]].get());
3970 m_ConstantsToBeCreated.push_back(operatorPtr->inputs[19]);
3971
3972 // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix.
3973 if (inputs.size() >= 21 && IsOptionalOperandPresent(operatorPtr->inputs[20]))
3974 {
3975 params.m_InputLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[20]].get(),
3976 inputTensorInfo).first;
3977 }
3978
3979 if (inputs.size() >= 22 && IsOptionalOperandPresent(operatorPtr->inputs[21]))
3980 {
3981 params.m_ForgetLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[21]].get(),
3982 inputTensorInfo).first;
3983 }
3984
3985 if (inputs.size() >= 23 && IsOptionalOperandPresent(operatorPtr->inputs[22]))
3986 {
3987 params.m_CellLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[22]].get(),
3988 inputTensorInfo).first;
3989 }
3990
3991 if (inputs.size() >= 24 && IsOptionalOperandPresent(operatorPtr->inputs[23]))
3992 {
3993 params.m_OutputLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[23]].get(),
3994 inputTensorInfo).first;
3995 }
3996
3997 // set the layer descriptor
3998 armnn::UnidirectionalSequenceLstmDescriptor desc;
3999 desc.m_ActivationFunc = nodeParams->fused_activation_function;
4000 desc.m_ClippingThresCell = nodeParams->cell_clip;
4001 desc.m_ClippingThresProj = nodeParams->proj_clip;
4002 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr
4003 || params.m_RecurrentToInputWeights == nullptr
4004 || params.m_InputGateBias == nullptr);
4005 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr);
4006 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
4007 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr
4008 || params.m_ForgetLayerNormWeights != nullptr
4009 || params.m_CellLayerNormWeights != nullptr
4010 || params.m_OutputLayerNormWeights != nullptr);
4011 desc.m_TimeMajor = nodeParams->time_major;
4012
Mike Kellyc0800a32022-06-15 10:57:52 +01004013 if (operatorPtr->intermediates.size() > 3 && desc.m_LayerNormEnabled)
Mike Kelly5880b912022-01-28 16:18:54 +00004014 {
4015 auto inputIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[0]].get(),
4016 inputTensorInfo).first;
4017 auto inputIntermediateTensorInfo = inputIntermediate->GetInfo();
4018 desc.m_InputIntermediateScale = inputIntermediateTensorInfo.GetQuantizationScale();
4019
4020 auto forgetIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[1]].get(),
4021 inputTensorInfo).first;
4022 auto forgetIntermediateTensorInfo = forgetIntermediate->GetInfo();
4023 desc.m_ForgetIntermediateScale = forgetIntermediateTensorInfo.GetQuantizationScale();
4024
4025 auto cellIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[2]].get(),
4026 inputTensorInfo).first;
4027 auto cellIntermediateTensorInfo = cellIntermediate->GetInfo();
4028 desc.m_CellIntermediateScale = cellIntermediateTensorInfo.GetQuantizationScale();
4029
4030 auto outputIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[3]].get(),
4031 inputTensorInfo).first;
4032 auto outputIntermediateTensorInfo = outputIntermediate->GetInfo();
4033 desc.m_OutputIntermediateScale = outputIntermediateTensorInfo.GetQuantizationScale();
4034 }
4035 else
4036 {
4037 float defaultIntermediate = std::pow(2, -12);
4038 desc.m_InputIntermediateScale = defaultIntermediate;
4039 desc.m_ForgetIntermediateScale = defaultIntermediate;
4040 desc.m_CellIntermediateScale = defaultIntermediate;
4041 desc.m_OutputIntermediateScale = defaultIntermediate;
4042 }
4043
Mike Kellyc0800a32022-06-15 10:57:52 +01004044 if (operatorPtr->intermediates.size() > 4)
4045 {
4046 auto hiddentensor = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[4]].get(),
4047 inputTensorInfo).first;
Mike Kelly5880b912022-01-28 16:18:54 +00004048
Mike Kellyc0800a32022-06-15 10:57:52 +01004049 desc.m_HiddenStateScale = hiddentensor->GetInfo().GetQuantizationScale();
4050 desc.m_HiddenStateZeroPoint = hiddentensor->GetInfo().GetQuantizationOffset();
4051 }
Mike Kelly5880b912022-01-28 16:18:54 +00004052 unsigned int batchSize = inputTensorInfo.GetShape()[0];
4053 unsigned int outputSize = outputTensorInfo.GetShape()[2];
4054 unsigned int numUnits = cellStateInInfo.GetShape()[1];
4055
4056 armnn::DataType dataType = inputTensorInfo.GetDataType();
4057 float qScale = inputTensorInfo.GetQuantizationScale();
4058 float qOffset = inputTensorInfo.GetQuantizationOffset();
4059
4060 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset);
4061 if (!desc.m_CifgEnabled)
4062 {
4063 scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
4064 }
4065 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits},
4066 cellStateInInfo.GetDataType(),
4067 cellStateInInfo.GetQuantizationScale(),
4068 cellStateInInfo.GetQuantizationOffset());
4069 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
4070
4071 armnn::LstmInputParamsInfo paramsInfo;
4072 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4073 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4074 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4075 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
4076 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4077 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
4078 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4079 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4080 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4081
4082 if (!desc.m_CifgEnabled)
4083 {
4084 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4085 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
4086 if (params.m_CellToInputWeights != nullptr)
4087 {
4088 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
4089 }
4090 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4091 }
4092
4093 if (desc.m_ProjectionEnabled)
4094 {
4095 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
4096 if (params.m_ProjectionBias != nullptr)
4097 {
4098 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
4099 }
4100 }
4101
4102 if (desc.m_PeepholeEnabled)
4103 {
4104 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
4105 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
4106 }
4107
4108 if (desc.m_LayerNormEnabled)
4109 {
4110 if(!desc.m_CifgEnabled)
4111 {
4112 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
4113 }
4114 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
4115 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
4116 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
4117 }
4118
4119 auto layerName = fmt::format("UnidirectionalSequenceLSTM:{}:{}", subgraphIndex, operatorIndex);
4120 armnn::IConnectableLayer* layer = m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
Ryan OSheac229b3f2023-06-27 22:34:54 +01004121
4122 if (!layer)
4123 {
4124 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4125 operatorIndex, CHECK_LOCATION().AsString()));
4126 }
Mike Kelly5880b912022-01-28 16:18:54 +00004127
4128 // register the input connection slots for the layer, connections are made after all layers have been created
4129 // only the tensors for the inputs are relevant, exclude the const tensors
4130 auto inputTensorIndexes = AsUnsignedVector({operatorPtr->inputs[0],
4131 operatorPtr->inputs[18],
4132 operatorPtr->inputs[19]});
4133 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0],
4134 inputTensorIndexes[1],
4135 inputTensorIndexes[2]});
4136
4137 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4138
4139 layer->GetOutputSlot(0).SetTensorInfo(outputStateOutTensorInfo);
4140 layer->GetOutputSlot(1).SetTensorInfo(cellStateOutTensorInfo);
4141 layer->GetOutputSlot(2).SetTensorInfo(outputTensorInfo);
4142
4143 unsigned int tensorIndex = outputTensorIndexes[0];
4144 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(2));
4145 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
4146}
4147
Kevin May7d96b162021-02-03 17:38:41 +00004148void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd200e3802019-04-15 09:47:39 +01004149{
4150 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4151
Mike Kelly0d77ae12022-01-07 17:42:27 +00004152 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4153 const auto* options = operatorPtr->builtin_options.AsUnpackOptions();
Nina Drozd200e3802019-04-15 09:47:39 +01004154
4155 // This unpackAxis indicates the axis to unpack
4156 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
4157
4158 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4159 CHECK_VALID_SIZE(inputs.size(), 1);
4160
Mike Kelly377fb212023-01-10 15:55:28 +00004161 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004162
4163 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
4164 {
4165 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004166 fmt::format("The unpack axis: {} cannot be greater than or equal to "
4167 "the number of input dimension {} {}",
4168 unpackAxis,
4169 inputTensorInfo.GetNumDimensions(),
4170 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004171 }
4172
Nina Drozd200e3802019-04-15 09:47:39 +01004173 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
4174 // If num is not defined, automatically infer from the length of the dimension axis.
4175 if(unpackNum == 0)
4176 {
4177 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
4178 }
4179
4180 // If unpack number cannot be inferred and is still zero, throw ParseException.
4181 if(unpackNum == 0)
4182 {
4183 throw ParseException("Number to unpack must greater than zero.");
4184 }
4185
4186 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4187 CHECK_VALID_SIZE(outputs.size(), unpackNum);
4188
4189 auto inputDimSize = inputTensorInfo.GetNumDimensions();
4190 std::vector<unsigned int> unpackDimSizes(inputDimSize);
4191
4192 // Add current input shape to unpackDimSizes
4193 for (unsigned int i = 0; i < inputDimSize; ++i)
4194 {
4195 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
4196 }
4197
4198 if (unpackDimSizes[unpackAxis] != unpackNum)
4199 {
4200 throw ParseException("Number to unpack must be the same as length of the dimension to "
4201 "unpack along.");
4202 }
4203
4204 unpackDimSizes[unpackAxis] /= unpackNum;
4205
4206 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
4207 for (unsigned int j = 0; j < unpackNum; ++j)
4208 {
4209 // Set the size of the views.
4210 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
4211 {
4212 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
4213 }
4214 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
4215 }
4216
James Ward58dec6b2020-09-11 17:32:44 +01004217 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01004218 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004219
4220 if (!layer)
4221 {
4222 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4223 operatorIndex, CHECK_LOCATION().AsString()));
4224 }
Nina Drozd200e3802019-04-15 09:47:39 +01004225
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004226 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
4227 unpackDimSizes.data());
4228
Nina Drozd200e3802019-04-15 09:47:39 +01004229 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4230 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4231
Finn Williamsb49ed182021-06-29 15:50:08 +01004232 std::vector<unsigned int> reshapeDims;
4233 for (unsigned int axis = 0; axis < splitOutShape.GetNumDimensions(); ++axis)
4234 {
4235 if (axis != unpackAxis)
4236 {
4237 reshapeDims.push_back(splitOutShape[axis]);
4238 }
4239 }
4240
4241 TensorShape reshapeOutputShape(splitOutShape.GetNumDimensions() -1, reshapeDims.data());
4242
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004243 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
4244 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4245 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01004246 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01004247 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004248 armnn::ReshapeDescriptor desc;
Finn Williamsb49ed182021-06-29 15:50:08 +01004249 desc.m_TargetShape = reshapeOutputShape;
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004250 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
4251
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01004252 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
4253 outputTensorInfo.GetDataType(),
4254 outputTensorInfo.GetQuantizationScale(),
4255 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004256 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
4257
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01004258 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01004259
4260 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
4261 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
4262 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
4263 }
Nina Drozd200e3802019-04-15 09:47:39 +01004264}
4265
Kevin May7d96b162021-02-03 17:38:41 +00004266void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd0324f482019-04-08 10:52:10 +01004267{
4268 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4269
Mike Kelly0d77ae12022-01-07 17:42:27 +00004270 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4271 const auto* options = operatorPtr->builtin_options.AsSplitOptions();
Nina Drozd0324f482019-04-08 10:52:10 +01004272
4273 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
4274
Nina Drozd200e3802019-04-15 09:47:39 +01004275 // If number of splits cannot be inferred and is zero, throw ParseException.
4276 if(numSplits == 0)
4277 {
4278 throw ParseException("Number to splits must greater than zero.");
4279 }
4280
Nina Drozd0324f482019-04-08 10:52:10 +01004281 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4282 CHECK_VALID_SIZE(inputs.size(), 2);
4283 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4284 CHECK_VALID_SIZE(outputs.size(), numSplits);
4285
Mike Kelly377fb212023-01-10 15:55:28 +00004286 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4287 armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Ryan OSheac229b3f2023-06-27 22:34:54 +01004288
4289 if (axisTensorInfo.GetNumElements() != 1)
4290 {
4291 throw ParseException(fmt::format("Axis tensor can only have 1 element {}",
4292 CHECK_LOCATION().AsString()));
4293 }
Nina Drozd0324f482019-04-08 10:52:10 +01004294
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01004295 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004296 if (axisBufferPtr == nullptr)
4297 {
4298 throw ParseException(
4299 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4300 CHECK_LOCATION().AsString()));
4301 }
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01004302
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004303 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
4304 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
4305 int32_t axis = axisData[0];
4306
4307 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4308 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4309 {
4310 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4311 // E.g. Rank 4 tensor can have axis in range [-4, 3)
4312 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4313 throw ParseException(
4314 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4315 axis,
4316 CHECK_LOCATION().AsString()));
4317 }
4318
4319 const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
Nina Drozd0324f482019-04-08 10:52:10 +01004320
Nina Drozd0324f482019-04-08 10:52:10 +01004321 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01004322 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01004323 {
4324 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004325 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
4326 inputTensorInfo.GetNumDimensions(),
4327 MaxNumOfTensorDimensions,
4328 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01004329 }
4330
4331 std::vector<unsigned int> splitterDimSizes(inputDimSize);
4332
4333 // Add current input shape to splitterDimSizes
4334 for (unsigned int i = 0; i < inputDimSize; ++i)
4335 {
4336 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
4337 }
4338
4339 if (splitterDimSizes[splitDim] % numSplits != 0)
4340 {
4341 throw ParseException("Number of splits must evenly divide the dimension");
4342 }
4343 splitterDimSizes[splitDim] /= numSplits;
4344
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01004345 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01004346 for (unsigned int j = 0; j < numSplits; ++j)
4347 {
4348 // Set the size of the views.
4349 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
4350 {
4351 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
4352 }
4353 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
4354 }
4355
James Ward58dec6b2020-09-11 17:32:44 +01004356 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01004357 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004358
4359 if (!layer)
4360 {
4361 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4362 operatorIndex, CHECK_LOCATION().AsString()));
4363 }
Nina Drozd0324f482019-04-08 10:52:10 +01004364
4365 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01004366 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01004367
Nina Drozd0324f482019-04-08 10:52:10 +01004368 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4369 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01004370 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01004371 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01004372 }
4373
4374 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4375 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4376}
4377
Derek Lambertif0176992020-04-28 13:37:49 +01004378unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
4379{
4380 int numDims = armnn::numeric_cast<int>(numDimsIn);
4381 int v = idx < 0 ? numDims + idx : idx;
Ryan OSheac229b3f2023-06-27 22:34:54 +01004382
4383 if (v < 0 || v > numDims)
4384 {
4385 throw ParseException(fmt::format("Unable to compute index {}", CHECK_LOCATION().AsString()));
4386 }
Derek Lambertif0176992020-04-28 13:37:49 +01004387
4388 return static_cast<unsigned int>(v);
4389}
4390
Kevin May7d96b162021-02-03 17:38:41 +00004391void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
Derek Lambertif0176992020-04-28 13:37:49 +01004392{
4393 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4394
Mike Kelly0d77ae12022-01-07 17:42:27 +00004395 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4396 const auto* options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01004397
4398 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4399 CHECK_VALID_SIZE(inputs.size(), 3);
4400
4401 auto& inputTensor = inputs[0];
4402 auto& splitsTensor = inputs[1];
4403 auto& axisTensor = inputs[2];
4404
4405 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
4406 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
4407 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
Ryan OSheac229b3f2023-06-27 22:34:54 +01004408
4409 if (axisTensorInfo.GetNumElements() != 1)
4410 {
4411 throw ParseException(fmt::format("Axis tensor can only have 1 element {}",
4412 CHECK_LOCATION().AsString()));
4413 }
Derek Lambertif0176992020-04-28 13:37:49 +01004414
4415 // Inputs
4416 auto inputDimSize = inputTensorInfo.GetNumDimensions();
4417 if (inputDimSize > MaxNumOfTensorDimensions)
4418 {
4419 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004420 fmt::format("The number of dimensions: {} for input tensors of the "
4421 "SplitV op cannot be greater than {} {}",
4422 inputTensorInfo.GetNumDimensions(),
4423 MaxNumOfTensorDimensions,
4424 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01004425 }
4426
4427 // Get split axis
4428 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004429 if (axisBufferPtr == nullptr)
4430 {
4431 throw ParseException(
4432 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4433 CHECK_LOCATION().AsString()));
4434 }
4435
Derek Lambertif0176992020-04-28 13:37:49 +01004436 std::vector<int> axisData(axisTensorInfo.GetNumElements());
4437 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004438 int32_t axis = axisData[0];
4439
4440 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4441 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4442 {
4443 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4444 // E.g. Rank 4 tensor can have axis in range [-4, 3)
4445 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4446 throw ParseException(
4447 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4448 axis,
4449 CHECK_LOCATION().AsString()));
4450 }
4451 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
Derek Lambertif0176992020-04-28 13:37:49 +01004452
Derek Lambertif0176992020-04-28 13:37:49 +01004453 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01004454 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01004455 unsigned int numSplits{0};
4456
4457 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01004458 {
4459 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01004460 }
4461 else
4462 {
Ryan OShea86704732020-05-26 11:41:04 +01004463 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01004464 }
4465
4466 if (numSplits <=0)
4467 {
4468 throw ParseException("SplitV has invalid number of splits");
4469 }
4470
Jan Eilersc0761e92020-06-29 16:48:44 +01004471 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01004472 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01004473 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01004474
Jan Eilersc0761e92020-06-29 16:48:44 +01004475 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01004476 int numInferred{0};
4477 unsigned int inferIdx{0};
4478 int splitSum{0};
4479 for (auto split : splitsData)
4480 {
4481 if (split < 0)
4482 {
4483 numInferred++;
4484 inferIdx = idx;
4485 }
4486 else
4487 {
4488 splitSum += split;
4489 }
4490 idx++;
4491 }
4492 // Check for inferred Axis
4493 if (numInferred == 0)
4494 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004495 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01004496 {
4497 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
4498 }
4499 }
4500 else if (numInferred == 1)
4501 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004502 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01004503 }
4504 else
4505 {
4506 throw ParseException("Cannot infer split size for more than one split");
4507 }
4508
Derek Lambertif0176992020-04-28 13:37:49 +01004509 //Ouput size validation
4510 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4511 CHECK_VALID_SIZE(outputs.size(), numSplits);
4512
4513 // Setup Armnn descriptor
4514 SplitterDescriptor splitDesc(numSplits, inputDimSize);
4515 unsigned int accumSplit = 0;
4516 for (unsigned int j = 0; j < numSplits; ++j)
4517 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004518 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01004519
4520 // Set the size of the views.
4521 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
4522 {
4523 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
4524 if (dimIdx == splitDim)
4525 {
4526 dimSize = splitSize;
4527 }
4528 splitDesc.SetViewSize(j, dimIdx, dimSize);
4529 }
4530
4531 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
4532 accumSplit += splitSize;
4533 }
4534
James Ward58dec6b2020-09-11 17:32:44 +01004535 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01004536 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004537
4538 if (!layer)
4539 {
4540 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4541 operatorIndex, CHECK_LOCATION().AsString()));
4542 }
Derek Lambertif0176992020-04-28 13:37:49 +01004543
4544 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4545 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4546
4547 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4548 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01004549 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01004550 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
4551 }
4552
4553 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4554 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4555}
4556
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004557void TfLiteParserImpl::ParseArgMin(size_t subgraphIndex, size_t operatorIndex)
4558{
4559 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Min);
4560}
4561
Kevin May7d96b162021-02-03 17:38:41 +00004562void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
Inki Daed4619e22020-09-10 15:33:54 +09004563{
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004564 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Max);
4565}
4566
4567void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, ArgMinMaxFunction argMinMaxFunction)
4568{
Inki Daed4619e22020-09-10 15:33:54 +09004569 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4570 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4571 CHECK_VALID_SIZE(inputs.size(), 2);
4572
4573 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4574 CHECK_VALID_SIZE(outputs.size(), 1);
4575
Mike Kelly377fb212023-01-10 15:55:28 +00004576 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4577 armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Inki Daed4619e22020-09-10 15:33:54 +09004578 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Ryan OSheac229b3f2023-06-27 22:34:54 +01004579
4580 if (axisTensorInfo.GetNumElements() != 1)
4581 {
4582 throw ParseException(fmt::format("Axis tensor can only have 1 element {}",
4583 CHECK_LOCATION().AsString()));
4584 }
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004585
4586 // Check if output tensor type is Signed32 or Signed64
Mike Kelly1f140f72021-04-06 12:25:55 +01004587 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
4588 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
4589 {
4590 throw ParseException(
4591 fmt::format(
4592 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
4593 CHECK_LOCATION().AsString()));
4594 }
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004595
4596 // Get const axis value from model and set it to descriptor.
4597 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
4598 if (axisBufferPtr == nullptr)
4599 {
4600 throw ParseException(
4601 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4602 CHECK_LOCATION().AsString()));
4603 }
4604
4605 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
4606 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
4607 int32_t axis = axisData.front();
4608
4609 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4610 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4611 {
4612 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4613 // E.g. Rank 4 tensor can have axis in range [-4, 3)
4614 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4615 throw ParseException(
4616 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4617 axis,
4618 CHECK_LOCATION().AsString()));
4619 }
4620
4621 ArgMinMaxDescriptor desc;
4622 desc.m_Axis = axis;
4623 desc.m_Function = argMinMaxFunction;
4624
4625 // Register a ArgMin/ArgMax layer.
4626 auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}";
4627 auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4628 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004629
4630 if (!layer)
4631 {
4632 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4633 operatorIndex, CHECK_LOCATION().AsString()));
4634 }
4635
Mike Kelly377fb212023-01-10 15:55:28 +00004636 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Inki Daed4619e22020-09-10 15:33:54 +09004637 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4638
4639 // Register input tensor to the layer.
4640 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4641 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4642
4643 // Register output tensor to the layer.
4644 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4645 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4646}
4647
Kevin May7d96b162021-02-03 17:38:41 +00004648void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00004649{
4650 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4651
Kevin May7d96b162021-02-03 17:38:41 +00004652 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004653 CHECK_VALID_SIZE(inputs.size(), 2);
Kevin May7d96b162021-02-03 17:38:41 +00004654 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004655 CHECK_VALID_SIZE(outputs.size(), 1);
4656
Mike Kelly377fb212023-01-10 15:55:28 +00004657 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4658 armnn::TensorInfo indicesTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4659 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Sadik Armagan26868492021-01-22 14:25:31 +00004660
4661 armnn::GatherDescriptor gatherDescriptor;
4662
Mike Kelly0d77ae12022-01-07 17:42:27 +00004663 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4664 const auto* options = operatorPtr->builtin_options.AsGatherOptions();
Sadik Armagan26868492021-01-22 14:25:31 +00004665 auto axis = options->axis;
4666
Mike Kelly377fb212023-01-10 15:55:28 +00004667 auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
4668
Sadik Armagan26868492021-01-22 14:25:31 +00004669 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4670 auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
4671 auto outputDimensions = outputTensorInfo.GetNumDimensions();
4672 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4673 {
4674 throw ParseException(
4675 fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
4676 axis,
4677 inputDimensions, inputDimensions,
4678 CHECK_LOCATION().AsString()));
4679 }
4680 if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
4681 {
4682 throw ParseException(
4683 fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
4684 outputDimensions,
4685 inputDimensions, indicesDimensions,
4686 CHECK_LOCATION().AsString()));
4687 }
4688
4689 gatherDescriptor.m_Axis = axis;
4690
Sadik Armagan26868492021-01-22 14:25:31 +00004691 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004692
4693 if (!layer)
4694 {
4695 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4696 operatorIndex, CHECK_LOCATION().AsString()));
4697 }
4698
Mike Kelly377fb212023-01-10 15:55:28 +00004699 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Sadik Armagan26868492021-01-22 14:25:31 +00004700 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4701
4702 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4703 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4704
4705 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4706 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4707}
4708
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004709void TfLiteParserImpl::ParseGatherNd(size_t subgraphIndex, size_t operatorIndex)
4710{
4711 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4712
4713 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4714 CHECK_VALID_SIZE(inputs.size(), 2);
4715 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4716 CHECK_VALID_SIZE(outputs.size(), 1);
4717
Mike Kelly377fb212023-01-10 15:55:28 +00004718 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4719 armnn::TensorInfo indicesTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004720
4721 auto layerName = fmt::format("GatherNd:{}:{}", subgraphIndex, operatorIndex);
4722 IConnectableLayer* layer = m_Network->AddGatherNdLayer(layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004723
4724 if (!layer)
4725 {
4726 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4727 operatorIndex, CHECK_LOCATION().AsString()));
4728 }
4729
Mike Kelly377fb212023-01-10 15:55:28 +00004730 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004731 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4732
4733 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4734 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4735
4736 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4737 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4738}
4739
Kevin May7d96b162021-02-03 17:38:41 +00004740void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00004741{
4742 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4743
Kevin May7d96b162021-02-03 17:38:41 +00004744 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004745 CHECK_VALID_SIZE(inputs.size(), 1);
Kevin May7d96b162021-02-03 17:38:41 +00004746 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004747 CHECK_VALID_SIZE(outputs.size(), 1);
4748
4749 armnn::DepthToSpaceDescriptor descriptor;
4750
Mike Kelly0d77ae12022-01-07 17:42:27 +00004751 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4752 const auto* options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
Sadik Armagan26868492021-01-22 14:25:31 +00004753 auto blockSize = options->block_size;
4754 if (blockSize < 2)
4755 {
4756 throw ParseException(
4757 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
4758 blockSize,
4759 CHECK_LOCATION().AsString()));
4760 }
4761 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
4762
4763 auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
4764 IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004765
4766 if (!layer)
4767 {
4768 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4769 operatorIndex, CHECK_LOCATION().AsString()));
4770 }
4771
Mike Kelly377fb212023-01-10 15:55:28 +00004772 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan26868492021-01-22 14:25:31 +00004773 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4774
4775 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4776 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4777
4778 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4779 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4780}
4781
Kevin May7d96b162021-02-03 17:38:41 +00004782void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004783{
Sadik Armagana2747482021-02-09 10:28:54 +00004784 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Sum);
4785}
4786
Teresa Charlin4e3e8312021-08-05 12:34:37 +01004787void TfLiteParserImpl::ParseReduceProd(size_t subgraphIndex, size_t operatorIndex)
4788{
4789 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Prod);
4790}
4791
Sadik Armagana2747482021-02-09 10:28:54 +00004792void TfLiteParserImpl::ParseReduceMax(size_t subgraphIndex, size_t operatorIndex)
4793{
4794 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Max);
4795}
4796
4797void TfLiteParserImpl::ParseReduceMin(size_t subgraphIndex, size_t operatorIndex)
4798{
4799 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Min);
4800}
4801
4802void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, ReduceOperation reduceOperation)
4803{
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004804 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4805
Mike Kelly0d77ae12022-01-07 17:42:27 +00004806 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4807 const auto* options = operatorPtr->builtin_options.AsReducerOptions();
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004808
4809 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4810 CHECK_VALID_SIZE(inputs.size(), 2);
4811
4812 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4813 CHECK_VALID_SIZE(outputs.size(), 1);
4814
Sadik Armagana2747482021-02-09 10:28:54 +00004815 auto layerName = fmt::format("Reduce:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004816
Mike Kelly377fb212023-01-10 15:55:28 +00004817 armnn::TensorInfo inputTensorInfo0 = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4818 armnn::TensorInfo inputTensorInfo1 = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004819
4820 ReduceDescriptor desc;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004821 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
4822 // Get const axis value from model and set it to descriptor.
4823 if (axisBufferPtr != nullptr)
4824 {
Sadik Armagan49bdb792021-02-11 13:57:07 +00004825 std::vector<int32_t> axisData(inputTensorInfo1.GetNumElements());
4826 ::memcpy(axisData.data(), axisBufferPtr->data.data(), inputTensorInfo1.GetNumBytes());
4827
4828 // Convert the axis to unsigned int and remove duplicates.
4829 auto rank = static_cast<int32_t>(inputTensorInfo0.GetNumDimensions());
4830 std::set<unsigned int> uniqueAxis;
4831 std::transform(axisData.begin(),
4832 axisData.end(),
4833 std::inserter(uniqueAxis, uniqueAxis.begin()),
4834 [rank](int i)->unsigned int{
4835 return static_cast<uint32_t>(((i + rank) % rank)); });
4836 desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004837 }
Sadik Armagana2747482021-02-09 10:28:54 +00004838 else
4839 {
4840 for (uint32_t i = 0; i < inputTensorInfo0.GetNumDimensions(); ++i)
4841 {
4842 desc.m_vAxis.push_back(i);
4843 }
4844 }
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004845
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004846 desc.m_KeepDims = options->keep_dims;
Sadik Armagana2747482021-02-09 10:28:54 +00004847 desc.m_ReduceOperation = reduceOperation;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004848
4849 // Register a new layer object, Sum.
Mike Kelly0d77ae12022-01-07 17:42:27 +00004850 IConnectableLayer* layer = m_Network->AddReduceLayer(desc, layerName.c_str());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004851
Mike Kelly377fb212023-01-10 15:55:28 +00004852 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004853 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4854
4855 // Register input tensor to the layer.
4856 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4857 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4858
4859 // Register output tensor to the layer.
4860 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4861 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4862}
4863
Mike Kelly31dce2b2021-09-01 21:22:37 +01004864void TfLiteParserImpl::ParseLocalResponseNormalization(size_t subgraphIndex, size_t operatorIndex)
4865{
4866 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4867
4868 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4869 CHECK_VALID_SIZE(inputs.size(), 1);
4870
4871 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4872 CHECK_VALID_SIZE(outputs.size(), 1);
4873
4874 auto layerName = fmt::format("LRN:{}:{}", subgraphIndex, operatorIndex);
4875 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4876
Mike Kelly377fb212023-01-10 15:55:28 +00004877 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly31dce2b2021-09-01 21:22:37 +01004878
4879 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4880 const auto* options = operatorPtr->builtin_options.AsLocalResponseNormalizationOptions();
4881
4882 armnn::NormalizationDescriptor descriptor;
4883 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4884 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
4885 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
4886 descriptor.m_NormSize = static_cast<uint32_t>(options->radius);
4887 descriptor.m_K = options->bias;
4888 descriptor.m_Alpha = options->alpha;
4889 descriptor.m_Beta = options->beta;
4890
4891 // ArmNN expects normSize to be the full size of the normalization
4892 // window rather than the radius as in TfLite.
4893 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
4894
4895 IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor, layerNameFormatted.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004896
4897 if (!layer)
4898 {
4899 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4900 operatorIndex, CHECK_LOCATION().AsString()));
4901 }
Mike Kelly31dce2b2021-09-01 21:22:37 +01004902
Mike Kelly377fb212023-01-10 15:55:28 +00004903 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Mike Kelly31dce2b2021-09-01 21:22:37 +01004904 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4905
4906 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4907 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4908
4909 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4910 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4911}
4912
Teresa Charlin28aa6692022-07-12 11:18:44 +01004913void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
4914{
4915 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
4916}
4917
Teresa Charlin93f0ad02023-03-23 15:28:02 +00004918void TfLiteParserImpl::ParseCeil(size_t subgraphIndex, size_t operatorIndex)
4919{
4920 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Ceil);
4921}
4922
Teresa Charlin28aa6692022-07-12 11:18:44 +01004923void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
4924{
4925 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
4926}
4927
4928void TfLiteParserImpl::ParseLog(size_t subgraphIndex, size_t operatorIndex)
4929{
4930 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Log);
4931}
4932
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004933void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
4934{
4935 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
4936}
4937
4938void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
4939{
4940 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
4941}
4942
John Mcloughlin0ec00872023-05-15 17:03:49 +01004943void TfLiteParserImpl::ParsePower(size_t subgraphIndex, size_t operatorIndex)
4944{
4945 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4946
4947 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4948 CHECK_VALID_SIZE(inputs.size(), 2);
4949
4950 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4951 CHECK_VALID_SIZE(outputs.size(), 1);
4952
4953 auto layerName = fmt::format("Power:{}:{}", subgraphIndex, operatorIndex);
4954
4955 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4956 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4957 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
4958
4959 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Power, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01004960
4961 if (!layer)
4962 {
4963 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
4964 operatorIndex, CHECK_LOCATION().AsString()));
4965 }
John Mcloughlin0ec00872023-05-15 17:03:49 +01004966
4967 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
4968 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
4969 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4970
4971 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4972 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4973
4974 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4975 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4976}
4977
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004978void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
4979{
4980 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
4981}
4982
Teresa Charlin28aa6692022-07-12 11:18:44 +01004983void TfLiteParserImpl::ParseSin(size_t subgraphIndex, size_t operatorIndex)
4984{
4985 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sin);
4986}
4987
Teresa Charlinf0fce5b2022-05-04 17:24:43 +01004988void TfLiteParserImpl::ParseSqrt(size_t subgraphIndex, size_t operatorIndex)
4989{
4990 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sqrt);
4991}
4992
Teresa Charlin6963b332023-07-11 11:35:41 +01004993void TfLiteParserImpl::ParseSquare(size_t subgraphIndex, size_t operatorIndex)
4994{
4995 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4996
4997 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4998 CHECK_VALID_SIZE(inputs.size(), 1);
4999
5000 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5001 CHECK_VALID_SIZE(outputs.size(), 1);
5002
5003 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
5004
5005 auto layerName = fmt::format("Square:{}:{}", subgraphIndex, operatorIndex);
5006 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Mul, layerName.c_str());
5007 ARMNN_ASSERT(layer != nullptr);
5008
5009 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 0});
5010 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
5011 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5012
5013 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5014 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[0]});
5015
5016 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5017 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
5018}
5019
John Mcloughlin0ec00872023-05-15 17:03:49 +01005020void TfLiteParserImpl::ParseSquaredDifference(size_t subgraphIndex, size_t operatorIndex)
5021{
5022 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5023
5024 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5025 CHECK_VALID_SIZE(inputs.size(), 2);
5026
5027 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5028 CHECK_VALID_SIZE(outputs.size(), 1);
5029
5030 auto layerName = fmt::format("SquaredDifference:{}:{}", subgraphIndex, operatorIndex);
5031
5032 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
5033 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
5034 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
5035
5036 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::SqDiff, layerName.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01005037
5038 if (!layer)
5039 {
5040 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5041 operatorIndex, CHECK_LOCATION().AsString()));
5042 }
John Mcloughlin0ec00872023-05-15 17:03:49 +01005043
5044 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
5045 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
5046 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5047
5048 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5049 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
5050
5051 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5052 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
5053}
5054
Matthew Sloyaned7fce42021-04-15 20:46:24 +01005055void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
5056{
5057 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5058
5059 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5060 CHECK_VALID_SIZE(inputs.size(), 1);
5061
5062 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5063 CHECK_VALID_SIZE(outputs.size(), 1);
5064
5065 std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
5066 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
5067
5068 ElementwiseUnaryDescriptor desc;
5069 desc.m_Operation = unaryOperation;
5070 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01005071
5072 if (!layer)
5073 {
5074 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5075 operatorIndex, CHECK_LOCATION().AsString()));
5076 }
Matthew Sloyaned7fce42021-04-15 20:46:24 +01005077
Mike Kelly377fb212023-01-10 15:55:28 +00005078 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Sloyaned7fce42021-04-15 20:46:24 +01005079 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5080
5081 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5082 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
5083
5084 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5085 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
5086}
5087
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03005088void TfLiteParserImpl::ParseEqual(size_t subgraphIndex, size_t operatorIndex)
5089{
5090 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Equal);
5091}
5092
5093void TfLiteParserImpl::ParseNotEqual(size_t subgraphIndex, size_t operatorIndex)
5094{
5095 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::NotEqual);
5096}
5097
5098void TfLiteParserImpl::ParseGreater(size_t subgraphIndex, size_t operatorIndex)
5099{
5100 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Greater);
5101}
5102
5103void TfLiteParserImpl::ParseGreaterOrEqual(size_t subgraphIndex, size_t operatorIndex)
5104{
5105 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::GreaterOrEqual);
5106}
5107
5108void TfLiteParserImpl::ParseLess(size_t subgraphIndex, size_t operatorIndex)
5109{
5110 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Less);
5111}
5112
5113void TfLiteParserImpl::ParseLessOrEqual(size_t subgraphIndex, size_t operatorIndex)
5114{
5115 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::LessOrEqual);
5116}
5117
5118void TfLiteParserImpl::ParseComparison(size_t subgraphIndex, size_t operatorIndex,
5119 ComparisonOperation comparisonOperation)
5120{
5121 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
5122
5123 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
5124 CHECK_VALID_SIZE(inputs.size(), 2);
5125
5126 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
5127 CHECK_VALID_SIZE(outputs.size(), 1);
5128
5129 auto layerName = std::string(GetComparisonOperationAsCString(comparisonOperation)) + ":{}:{}";
5130 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
5131
Mike Kelly377fb212023-01-10 15:55:28 +00005132 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
5133 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03005134 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerNameFormatted, "Input 0", "Input 1");
5135
5136 ComparisonDescriptor desc;
5137 desc.m_Operation = comparisonOperation;
5138 IConnectableLayer* layer = m_Network->AddComparisonLayer(desc, layerNameFormatted.c_str());
Ryan OSheac229b3f2023-06-27 22:34:54 +01005139
5140 if (!layer)
5141 {
5142 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5143 operatorIndex, CHECK_LOCATION().AsString()));
5144 }
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03005145
Mike Kelly377fb212023-01-10 15:55:28 +00005146 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03005147 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
5148
5149 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
5150 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
5151
5152 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
5153 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
5154}
5155
Mike Kelly04d82292023-01-19 18:29:40 +00005156armnn::IConnectableLayer* TfLiteParserImpl::AddReshapeLayer(armnn::IConnectableLayer* layer,
5157 unsigned int outputSlot,
5158 std::string reshapeLayerName,
5159 armnn::TensorInfo outputShape)
5160{
5161 ReshapeDescriptor desc;
5162 desc.m_TargetShape = outputShape.GetShape();
5163
5164 IConnectableLayer* reshapeLayer =
5165 m_Network->AddReshapeLayer(desc, reshapeLayerName.c_str());
5166
5167 auto & prevOutputSlot = layer->GetOutputSlot(outputSlot);
5168 prevOutputSlot.Connect(reshapeLayer->GetInputSlot(0));
5169 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputShape);
5170 return reshapeLayer;
5171}
5172
Kevin May7d96b162021-02-03 17:38:41 +00005173armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
5174 unsigned int outputSlot,
5175 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01005176{
5177 ActivationDescriptor activationDesc;
5178 std::string layerName = prevLayer->GetName();
5179
5180 switch(activationType)
5181 {
5182 case tflite::ActivationFunctionType_NONE:
5183 {
5184 // this is a no-op: return previous layer
5185 return prevLayer;
5186 }
5187 case tflite::ActivationFunctionType_RELU:
5188 {
5189 activationDesc.m_Function = ActivationFunction::ReLu;
5190 layerName += ":RELU";
5191 break;
5192 }
5193 case tflite::ActivationFunctionType_RELU6:
5194 {
5195 activationDesc.m_Function = ActivationFunction::BoundedReLu;
5196 activationDesc.m_A = 6.0f;
5197 activationDesc.m_B = 0.0f;
5198 layerName += ":RELU6";
5199 break;
5200 }
5201 case tflite::ActivationFunctionType_TANH:
5202 {
5203 activationDesc.m_Function = ActivationFunction::TanH;
5204 activationDesc.m_A = 1.0f;
5205 activationDesc.m_B = 1.0f;
5206 layerName += ":TANH";
5207 break;
5208 }
5209
5210 // I only put these here as a reminder what others we could support
5211 case tflite::ActivationFunctionType_RELU_N1_TO_1:
5212 case tflite::ActivationFunctionType_SIGN_BIT:
5213 default:
5214 {
5215 throw ParseException(
Mike Kelly377fb212023-01-10 15:55:28 +00005216 fmt::format("TfLite parser doesn't support fused activation: "
James Ward58dec6b2020-09-11 17:32:44 +01005217 "{}/{} {} ",
5218 activationType,
5219 tflite::EnumNameActivationFunctionType(activationType),
5220 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005221
5222 }
5223 }
5224
5225 IConnectableLayer* activationLayer =
5226 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
5227
5228 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
5229 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
5230 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
5231 return activationLayer;
5232}
5233
Teresa Charlincdbd40b2022-02-25 13:21:55 +00005234armnn::IConnectableLayer* TfLiteParserImpl::AddFusedFloorLayer(armnn::IConnectableLayer* prevLayer,
5235 unsigned int outputSlot)
5236{
Teresa Charlin725728e2022-05-05 13:33:33 +01005237
5238 auto& prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
5239 DataType dataType = prevOutputSlot.GetTensorInfo().GetDataType();
5240
5241 if (dataType == DataType::Signed32)
5242 {
5243 return prevLayer;
5244 }
5245
Teresa Charlincdbd40b2022-02-25 13:21:55 +00005246 std::string layerName = prevLayer->GetName();
5247 IConnectableLayer* floorLayer = m_Network->AddFloorLayer(layerName.c_str());
5248
Teresa Charlincdbd40b2022-02-25 13:21:55 +00005249 prevOutputSlot.Connect(floorLayer->GetInputSlot(0));
5250 floorLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
Teresa Charlin725728e2022-05-05 13:33:33 +01005251
Teresa Charlincdbd40b2022-02-25 13:21:55 +00005252 return floorLayer;
5253}
5254
Mike Kelly0d77ae12022-01-07 17:42:27 +00005255TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char* fileName)
telsoa01c577f2c2018-08-31 09:22:23 +01005256{
5257 if (fileName == nullptr)
5258 {
James Ward58dec6b2020-09-11 17:32:44 +01005259 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01005260 CHECK_LOCATION().AsString()));
5261 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01005262 std::error_code errorCode;
5263 fs::path pathToFile(fileName);
5264 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01005265 {
James Ward58dec6b2020-09-11 17:32:44 +01005266 //fmt::format() could not be used here (format error)
5267 std::stringstream msg;
5268 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
5269 << " " << CHECK_LOCATION().AsString();
James Ward58dec6b2020-09-11 17:32:44 +01005270 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01005271 }
Colm Donelan0dfb2652023-06-22 10:19:17 +01005272 if (!fs::is_regular_file(pathToFile))
5273 {
5274 // Exclude non regular files.
5275 throw InvalidArgumentException(fmt::format("File \"{}\" is not a regular file and cannot be loaded.",
5276 pathToFile.c_str()));
5277 }
5278
telsoa01c577f2c2018-08-31 09:22:23 +01005279 std::ifstream file(fileName, std::ios::binary);
5280 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
5281 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
5282 fileContent.size());
5283}
5284
Mike Kelly0d77ae12022-01-07 17:42:27 +00005285TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromBinary(const uint8_t* binaryContent, size_t len)
telsoa01c577f2c2018-08-31 09:22:23 +01005286{
5287 if (binaryContent == nullptr)
5288 {
James Ward58dec6b2020-09-11 17:32:44 +01005289 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01005290 CHECK_LOCATION().AsString()));
5291 }
5292 flatbuffers::Verifier verifier(binaryContent, len);
5293 if (verifier.VerifyBuffer<tflite::Model>() == false)
5294 {
5295 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005296 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
5297 "flatbuffers format. size:{} {}",
5298 len,
5299 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005300 }
5301 return tflite::UnPackModel(binaryContent);
5302}
5303
Mike Kelly0d77ae12022-01-07 17:42:27 +00005304TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetInputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00005305 size_t subgraphIndex,
5306 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005307{
5308 CHECK_MODEL(model, subgraphIndex, operatorIndex);
5309
Mike Kelly0d77ae12022-01-07 17:42:27 +00005310 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5311 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01005312
5313 size_t inputCount = operatorPtr->inputs.size();
mathad01c21025d2021-04-26 10:09:37 +01005314 TensorRawPtrVector result;
Mike Kelly0d77ae12022-01-07 17:42:27 +00005315 for (size_t i = 0; i < inputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005316 {
mathad01c21025d2021-04-26 10:09:37 +01005317 // If the input location is -1 then assume input is turned off.
5318 if (operatorPtr->inputs[i] == -1)
5319 {
5320 continue;
5321 }
5322 else
5323 {
5324 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
5325 result.push_back(subgraphPtr->tensors[inputId].get());
5326 }
telsoa01c577f2c2018-08-31 09:22:23 +01005327 }
5328 return result;
5329}
5330
Mike Kelly0d77ae12022-01-07 17:42:27 +00005331TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetOutputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00005332 size_t subgraphIndex,
5333 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005334{
5335 CHECK_MODEL(model, subgraphIndex, operatorIndex);
5336
Mike Kelly0d77ae12022-01-07 17:42:27 +00005337 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5338 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01005339
5340 size_t outputCount = operatorPtr->outputs.size();
5341 TensorRawPtrVector result(outputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005342 for (size_t i = 0; i < outputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005343 {
5344 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
5345 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01005346 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01005347 }
5348 return result;
5349}
5350
Mike Kelly0d77ae12022-01-07 17:42:27 +00005351TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphInputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00005352 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005353{
5354 CHECK_SUBGRAPH(model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005355 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01005356
Derek Lambertiff05cc52019-04-26 13:05:17 +01005357 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01005358 TensorIdRawPtrVector result(inputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005359 for (size_t i = 0; i < inputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005360 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01005361 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01005362 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01005363 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01005364 }
5365 return result;
5366}
5367
Mike Kelly0d77ae12022-01-07 17:42:27 +00005368TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphOutputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00005369 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005370{
5371 CHECK_SUBGRAPH(model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005372 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01005373
Derek Lambertiff05cc52019-04-26 13:05:17 +01005374 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01005375 TensorIdRawPtrVector result(outputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005376 for (size_t i = 0; i < outputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005377 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01005378 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
5379 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01005380 }
5381 return result;
5382}
5383
Kevin May7d96b162021-02-03 17:38:41 +00005384std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
5385 size_t subgraphIndex,
5386 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005387{
5388 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005389 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5390 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01005391 return operatorPtr->inputs;
5392}
5393
Kevin May7d96b162021-02-03 17:38:41 +00005394std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
5395 size_t subgraphIndex,
5396 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005397{
5398 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005399 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5400 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01005401 return operatorPtr->outputs;
5402}
5403
Kevin May7d96b162021-02-03 17:38:41 +00005404void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
5405 size_t operatorIndex,
5406 IConnectableLayer* layer,
Finn Williamsd4fa5452021-03-01 12:31:41 +00005407 const std::vector<unsigned int>& tensorIndexes,
5408 unsigned int startingSlotIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005409{
5410 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Ryan OSheac229b3f2023-06-27 22:34:54 +01005411
5412 if (!layer)
5413 {
5414 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5415 operatorIndex, CHECK_LOCATION().AsString()));
5416 }
Matthew Sloyan81beae32021-07-13 19:46:11 +01005417
Finn Williamsd4fa5452021-03-01 12:31:41 +00005418 if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots())
telsoa01c577f2c2018-08-31 09:22:23 +01005419 {
5420 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005421 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
5422 " for subgraph:{} operator index:{} {}",
5423 tensorIndexes.size(),
5424 layer->GetNumInputSlots(),
5425 subgraphIndex,
5426 operatorIndex,
5427 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005428 }
5429
Finn Williamsd4fa5452021-03-01 12:31:41 +00005430 for (unsigned int index = 0; index < tensorIndexes.size() ; ++index)
telsoa01c577f2c2018-08-31 09:22:23 +01005431 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00005432 unsigned int tensorIndex = tensorIndexes[index];
5433 armnn::IInputSlot* slot = &(layer->GetInputSlot(startingSlotIndex + index));
telsoa01c577f2c2018-08-31 09:22:23 +01005434 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
5435 }
5436}
5437
Kevin May7d96b162021-02-03 17:38:41 +00005438void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
5439 size_t operatorIndex,
5440 IConnectableLayer* layer,
5441 const std::vector<unsigned int>& tensorIndexes)
telsoa01c577f2c2018-08-31 09:22:23 +01005442{
5443 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Ryan OSheac229b3f2023-06-27 22:34:54 +01005444
5445 if (!layer)
5446 {
5447 throw NullPointerException(fmt::format("Layer {} pointer is null {}",
5448 operatorIndex, CHECK_LOCATION().AsString()));
5449 }
5450
telsoa01c577f2c2018-08-31 09:22:23 +01005451 if (tensorIndexes.size() != layer->GetNumOutputSlots())
5452 {
5453 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005454 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
5455 " for subgraph:{} operator index:{} {}",
5456 tensorIndexes.size(),
5457 layer->GetNumOutputSlots(),
5458 subgraphIndex,
5459 operatorIndex,
5460 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005461 }
5462
5463 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
5464 {
5465 unsigned int tensorIndex = tensorIndexes[slotIndex];
5466 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
5467 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
5468 }
5469}
5470
Mike Kelly377fb212023-01-10 15:55:28 +00005471void TfLiteParserImpl::SetupInputLayerTensorInfos(size_t subgraphIndex)
5472{
5473 CHECK_SUBGRAPH(m_Model, subgraphIndex);
5474
5475 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
5476 for (auto const& tensorIdAndPtr : inputs)
5477 {
5478 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
5479 m_TensorInfos.insert({tensorIdAndPtr.first, tensorInfo});
5480 }
5481}
5482
Kevin May7d96b162021-02-03 17:38:41 +00005483void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005484{
5485 CHECK_SUBGRAPH(m_Model, subgraphIndex);
5486
5487 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005488 for (auto const& tensorIdAndPtr : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005489 {
5490 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
5491 IConnectableLayer* layer =
5492 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
5493
5494 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
5495 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
5496
5497 RegisterOutputSlots(subgraphIndex,
5498 VIRTUAL_OPERATOR_ID,
5499 layer,
5500 { static_cast<uint32_t>(tensorIdAndPtr.first) });
5501 }
5502}
5503
Kevin May7d96b162021-02-03 17:38:41 +00005504void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005505{
5506 CHECK_SUBGRAPH(m_Model, subgraphIndex);
5507
5508 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005509 for (auto const& tensorIdAndPtr : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005510 {
5511 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
5512 IConnectableLayer* layer =
5513 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
5514
5515 RegisterInputSlots(subgraphIndex,
5516 VIRTUAL_OPERATOR_ID,
5517 layer,
5518 { static_cast<uint32_t>(tensorIdAndPtr.first) });
5519 }
5520}
5521
Mike Kelly377fb212023-01-10 15:55:28 +00005522void TfLiteParserImpl::SetupConstantLayerTensorInfos(size_t subgraph)
5523{
5524 CHECK_SUBGRAPH(m_Model, subgraph);
5525
5526 const auto & subgraphPtr = m_Model->subgraphs[subgraph];
5527 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
5528 {
5529 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
5530 {
5531 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
5532 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
5533 {
5534 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
5535
5536 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5537
5538 m_TensorInfos.insert({tensorIndex, tensorInfo});
5539 }
5540 }
5541 }
5542}
5543
Mike Kelly5880b912022-01-28 16:18:54 +00005544void TfLiteParserImpl::SetupConstantLayers(size_t subgraph)
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005545{
Mike Kelly5880b912022-01-28 16:18:54 +00005546 CHECK_SUBGRAPH(m_Model, subgraph);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005547
Mike Kelly5880b912022-01-28 16:18:54 +00005548 const auto & subgraphPtr = m_Model->subgraphs[subgraph];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005549 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
5550 {
5551 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
5552 {
5553 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
5554 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
5555 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01005556 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005557
Mike Kelly5880b912022-01-28 16:18:54 +00005558 if (IsConstTensor(tensorPtr))
Matthew Sloyan81beae32021-07-13 19:46:11 +01005559 {
5560 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
Mike Kelly5880b912022-01-28 16:18:54 +00005561 armnn::DataType dataType = tensorInfo.GetDataType();
5562
5563 if (std::find(m_ConstantsToDequantize.begin(), m_ConstantsToDequantize.end(), tensorPtr->buffer)
5564 != m_ConstantsToDequantize.end())
5565 {
5566 dataType = DataType::Float32;
5567 }
5568 auto tensorAndData = CreateConstTensorNonPermuted(tensorPtr, tensorInfo, dataType);
5569
5570 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
5571 IConnectableLayer *layer = m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
5572
5573 layer->GetOutputSlot(0).SetTensorInfo(tensorAndData.first.GetInfo());
5574 RegisterOutputSlots(subgraphIndex,
5575 VIRTUAL_OPERATOR_ID,
5576 layer,
5577 { tensorIndex });
5578 }
5579 else if (ShouldConstantTensorBeCreated(tensorIndex))
5580 {
5581 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5582 armnn::DataType dataType = tensorInfo.GetDataType();
5583
5584 if (std::find(m_ConstantsToDequantize.begin(), m_ConstantsToDequantize.end(), tensorPtr->buffer)
5585 != m_ConstantsToDequantize.end())
5586 {
5587 dataType = DataType::Float32;
5588 }
5589 // Make sure isConstant flag is set.
5590 tensorInfo.SetConstant();
5591 tensorInfo.SetDataType(dataType);
5592
5593 auto tensorAndData = ConstTensor(tensorInfo, std::vector<uint8_t>(tensorInfo.GetNumBytes()));
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005594
Matthew Sloyan81beae32021-07-13 19:46:11 +01005595 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005596 IConnectableLayer* layer = m_Network->AddConstantLayer(tensorAndData, layerName.c_str());
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005597
Matthew Sloyan81beae32021-07-13 19:46:11 +01005598 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
5599 RegisterOutputSlots(subgraphIndex,
5600 VIRTUAL_OPERATOR_ID,
5601 layer,
Mike Kelly5880b912022-01-28 16:18:54 +00005602 {tensorIndex});
Matthew Sloyan81beae32021-07-13 19:46:11 +01005603 }
5604 else
5605 {
5606 throw ParseException(
5607 fmt::format("Invalid Tensor: Tensor should be constant. {}",
5608 CHECK_LOCATION().AsString()));
5609 }
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005610 }
5611 }
5612 }
5613}
5614
telsoa01c577f2c2018-08-31 09:22:23 +01005615// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Kevin May7d96b162021-02-03 17:38:41 +00005616TfLiteParserImpl::BufferRawPtr TfLiteParserImpl::GetBuffer(const ModelPtr& model, size_t bufferIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005617{
5618 CHECK_BUFFER(model, bufferIndex);
5619 return model->buffers[bufferIndex].get();
5620}
5621
Matteo Martincigh747ef822018-12-18 09:26:39 +00005622template<typename T>
Kevin May7d96b162021-02-03 17:38:41 +00005623std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
5624TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
5625 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00005626 armnn::TensorInfo& tensorInfo,
5627 armnn::Optional<armnn::PermutationVector&> permutationVector)
5628{
Matthew Sloyan81beae32021-07-13 19:46:11 +01005629 // Make sure isConstant flag is set.
5630 tensorInfo.SetConstant();
5631
Matteo Martincigh747ef822018-12-18 09:26:39 +00005632 auto constData = CreateConstTensorImpl<T>(bufferPtr,
5633 tensorPtr,
5634 tensorInfo,
5635 permutationVector);
Kevin May7d96b162021-02-03 17:38:41 +00005636 TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
Matteo Martincigh747ef822018-12-18 09:26:39 +00005637 return std::make_pair(constData.first, std::move(storage));
5638}
5639
Mike Kelly5880b912022-01-28 16:18:54 +00005640bool TfLiteParserImpl::ShouldConstantTensorBeCreated(unsigned int tensorIndex)
5641{
5642 // If the TensorIndex appears in the list of ConstantsToBeCreated then return true
5643 return (std::find(m_ConstantsToBeCreated.begin(), m_ConstantsToBeCreated.end(), tensorIndex)
5644 != m_ConstantsToBeCreated.end());
5645}
5646
Finn Williamsd4fa5452021-03-01 12:31:41 +00005647bool TfLiteParserImpl::IsConstTensor(TensorRawPtr tensorPtr)
5648{
5649 CHECK_TENSOR_PTR(tensorPtr);
mathad01bf7edb62021-04-20 16:12:45 +01005650 bool isConst = true;
5651
5652 auto buffer = GetBuffer(m_Model, tensorPtr->buffer);
5653 if (buffer->data.size() == 0)
5654 {
5655 isConst = false;
5656 }
5657
5658 return isConst;
Finn Williamsd4fa5452021-03-01 12:31:41 +00005659}
5660
Kevin May7d96b162021-02-03 17:38:41 +00005661std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
Finn Williamsd4fa5452021-03-01 12:31:41 +00005662TfLiteParserImpl::CreateConstTensorPermuted(TensorRawPtr tensorPtr,
5663 armnn::TensorInfo& tensorInfo,
5664 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01005665{
5666 CHECK_TENSOR_PTR(tensorPtr);
5667 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5668 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5669
Matthew Sloyan81beae32021-07-13 19:46:11 +01005670 // Make sure isConstant flag is set.
5671 tensorInfo.SetConstant();
5672
telsoa01c577f2c2018-08-31 09:22:23 +01005673 switch (tensorInfo.GetDataType())
5674 {
5675 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005676 return CreateConstTensorAndStoreData<float>(bufferPtr,
5677 tensorPtr,
5678 tensorInfo,
5679 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00005680 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005681 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
5682 tensorPtr,
5683 tensorInfo,
5684 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00005685 case armnn::DataType::QSymmS8:
5686 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
5687 tensorPtr,
5688 tensorInfo,
5689 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00005690 case armnn::DataType::QAsymmS8:
5691 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
5692 tensorPtr,
5693 tensorInfo,
5694 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01005695 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005696 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
5697 tensorPtr,
5698 tensorInfo,
5699 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01005700 default:
5701 {
5702 std::stringstream errString;
5703 errString << "Unexpected datatype when creating const tensor: "
5704 << armnn::GetDataTypeName(tensorInfo.GetDataType())
5705 << " shape:" << tensorInfo.GetShape()
5706 << CHECK_LOCATION().AsString();
5707 throw ParseException(errString.str());
5708 }
5709 }
5710}
5711
Finn Williamsd4fa5452021-03-01 12:31:41 +00005712armnn::ConstTensor TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
5713 armnn::TensorInfo& tensorInfo)
5714{
5715 CHECK_TENSOR_PTR(tensorPtr);
5716 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5717 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5718
Matthew Sloyan81beae32021-07-13 19:46:11 +01005719 // Make sure isConstant flag is set.
5720 tensorInfo.SetConstant();
5721
Finn Williamsd4fa5452021-03-01 12:31:41 +00005722 return ConstTensor(tensorInfo, bufferPtr->data.data());
5723}
5724
Mike Kelly5880b912022-01-28 16:18:54 +00005725std::pair<armnn::ConstTensor, std::unique_ptr<float[]>>
5726TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
5727 armnn::TensorInfo& tensorInfo,
5728 armnn::DataType inputDataType)
5729{
5730 CHECK_TENSOR_PTR(tensorPtr);
5731 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5732 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5733
5734 // Make sure isConstant flag is set.
5735 tensorInfo.SetConstant();
5736
Mike Kelly0506ef02023-01-03 16:29:44 +00005737 if (inputDataType == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
Mike Kelly5880b912022-01-28 16:18:54 +00005738 {
Mike Kelly0506ef02023-01-03 16:29:44 +00005739 try
5740 {
5741 TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
5742 std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
5743 return std::make_pair(ConstTensor(constTensorInfo, data.get()), std::move(data));
5744 }
Cathal Corbett9c843c32023-01-09 17:51:37 +00005745 catch (InvalidArgumentException&)
Mike Kelly0506ef02023-01-03 16:29:44 +00005746 {
5747 throw ParseException(
5748 fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
5749 GetDataTypeName(DataType::Float32),
5750 GetDataTypeName(tensorInfo.GetDataType()),
5751 CHECK_LOCATION().AsString()));
5752 }
Mike Kelly5880b912022-01-28 16:18:54 +00005753 }
5754 else
5755 {
5756 return std::make_pair(ConstTensor(tensorInfo, bufferPtr->data.data()), std::unique_ptr<float[]>());
5757 }
5758}
5759
5760std::pair<armnn::ConstTensor*, std::unique_ptr<float[]>>
5761TfLiteParserImpl::CreateConstTensorPtr(TensorRawPtr tensorPtr, armnn::TensorInfo& inputTensorInfo)
5762{
5763 CHECK_TENSOR_PTR(tensorPtr);
5764 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5765 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5766 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5767
5768 // Make sure isConstant flag is set.
5769 tensorInfo.SetConstant();
5770
5771 if (inputTensorInfo.GetDataType() == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
5772 {
Mike Kelly0506ef02023-01-03 16:29:44 +00005773 try
5774 {
5775 TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
5776 std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
5777 return std::make_pair(new ConstTensor(constTensorInfo, data.get()), std::move(data));
5778 }
Cathal Corbett9c843c32023-01-09 17:51:37 +00005779 catch (InvalidArgumentException&)
Mike Kelly0506ef02023-01-03 16:29:44 +00005780 {
5781 throw ParseException(
5782 fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
5783 GetDataTypeName(DataType::Float32),
5784 GetDataTypeName(tensorInfo.GetDataType()),
5785 CHECK_LOCATION().AsString()));
5786 }
Mike Kelly5880b912022-01-28 16:18:54 +00005787 }
5788 else
5789 {
5790 return std::make_pair(new ConstTensor(tensorInfo, bufferPtr->data.data()), std::unique_ptr<float[]>());
5791 }
5792}
5793
Kevin May7d96b162021-02-03 17:38:41 +00005794BindingPointInfo TfLiteParserImpl::GetNetworkInputBindingInfo(size_t subgraphId,
5795 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01005796{
5797 CHECK_SUBGRAPH(m_Model, subgraphId);
5798 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005799 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005800 {
5801 if (input.second->name == name)
5802 {
5803 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
Colm Donelan4bc993b2021-11-09 20:39:10 +00005804 auto inputTensorInfo = ToTensorInfo(input.second);
5805 // Input tensors are always treated as constant tensors during network execution.
5806 inputTensorInfo.SetConstant(true);
5807 return std::make_pair(bindingId, inputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01005808 }
5809 }
5810
5811 std::stringstream bindings;
Mike Kelly0d77ae12022-01-07 17:42:27 +00005812 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005813 {
5814 bindings << "'" << input.second->name << "' ";
5815 }
5816
5817 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005818 fmt::format("No input binding found for subgraph:{} and name:{}. "
5819 "Possible inputs are: [{}] {}",
5820 subgraphId,
5821 name,
5822 bindings.str(),
5823 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005824}
5825
Kevin May7d96b162021-02-03 17:38:41 +00005826BindingPointInfo TfLiteParserImpl::GetNetworkOutputBindingInfo(size_t subgraphId,
5827 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01005828{
5829 CHECK_SUBGRAPH(m_Model, subgraphId);
5830 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005831 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005832 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005833 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01005834 if (output.second->name == name)
5835 {
5836 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Mike Kelly377fb212023-01-10 15:55:28 +00005837 std::vector<unsigned int> shape = m_OverriddenOutputShapes.size() > 0 ?
5838 m_OverriddenOutputShapes[i] : AsUnsignedVector(output.second->shape);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005839 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01005840 }
5841 }
5842
5843 std::stringstream bindings;
Mike Kelly0d77ae12022-01-07 17:42:27 +00005844 for (auto const& output : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005845 {
5846 bindings << "'" << output.second->name << "' ";
5847 }
5848
5849 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005850 fmt::format("No output binding found for subgraph:{} and name:{}. "
5851 "Possible outputs are: [{}] {}",
5852 subgraphId,
5853 name,
5854 bindings.str(),
5855 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005856}
5857
Kevin May7d96b162021-02-03 17:38:41 +00005858size_t TfLiteParserImpl::GetSubgraphCount() const
telsoa01c577f2c2018-08-31 09:22:23 +01005859{
5860 return m_Model->subgraphs.size();
5861}
5862
Kevin May7d96b162021-02-03 17:38:41 +00005863std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01005864{
5865 CHECK_SUBGRAPH(m_Model, subgraphId);
5866 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
5867 std::vector<std::string> result;
5868 result.reserve(inputs.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +00005869 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005870 {
5871 result.push_back(input.second->name);
5872 }
5873 return result;
5874}
5875
Kevin May7d96b162021-02-03 17:38:41 +00005876std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01005877{
5878 CHECK_SUBGRAPH(m_Model, subgraphId);
5879 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
5880 std::vector<std::string> result;
5881 result.reserve(outputs.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +00005882 for (auto const& output : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005883 {
5884 result.push_back(output.second->name);
5885 }
5886 return result;
5887}
5888
Matthew Sloyanac001ee2021-02-03 10:43:04 +00005889const std::string TfLiteParserImpl::GetVersion()
5890{
5891 return TFLITE_PARSER_VERSION;
5892}
5893
Mike Kelly0d77ae12022-01-07 17:42:27 +00005894TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005895: m_FloatData(std::move(data))
5896, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00005897, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01005898, m_Int32Data(nullptr)
5899{
5900}
5901
Mike Kelly0d77ae12022-01-07 17:42:27 +00005902TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005903: m_FloatData(nullptr)
5904, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00005905, m_Int8Data(nullptr)
5906, m_Int32Data(nullptr)
5907{
5908}
5909
Mike Kelly0d77ae12022-01-07 17:42:27 +00005910TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]>&& data)
Keith Davisd305e1a2020-01-22 11:57:54 +00005911: m_FloatData(nullptr)
5912, m_Uint8Data(nullptr)
5913, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01005914, m_Int32Data(nullptr)
5915{
5916}
5917
Mike Kelly0d77ae12022-01-07 17:42:27 +00005918TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005919: m_FloatData(nullptr)
5920, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00005921, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01005922, m_Int32Data(std::move(data))
5923{
5924}
5925
5926} // armnnTfLiteParser