blob: c787212359c22874b062154c092a63b0359fa96b [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kelly04d82292023-01-19 18:29:40 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Sloyanac001ee2021-02-03 10:43:04 +00008#include "armnnTfLiteParser/Version.hpp"
Mike Kelly5880b912022-01-28 16:18:54 +00009#include "armnn/LstmParams.hpp"
Matthew Sloyanac001ee2021-02-03 10:43:04 +000010
Sadik Armagand109a4d2020-07-28 10:42:13 +010011#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +000012#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010013#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000014#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010015#include <armnn/Tensor.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000016#include <armnnUtils/TensorUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010017#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010018#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000019#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010020#include <armnn/utility/NumericCast.hpp>
Mike Kelly377fb212023-01-10 15:55:28 +000021#include <armnn/LayerSupport.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022
23// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000024#include <armnnUtils/Permute.hpp>
Rob Hughes9542f902021-07-14 09:48:54 +010025#include <armnnUtils/Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000026
Sadik Armagan479045b2018-10-01 11:51:37 +010027#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010028#include <VerificationHelpers.hpp>
29
30// The generated code based on the Tf Lite schema:
31#include <schema_generated.h>
32
Matteo Martincighe011d202019-11-28 11:35:47 +000033#include <flatbuffers/flexbuffers.h>
34
James Ward58dec6b2020-09-11 17:32:44 +010035#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010036
telsoa01c577f2c2018-08-31 09:22:23 +010037#include <algorithm>
Matthew Sloyanac001ee2021-02-03 10:43:04 +000038#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010039#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010040#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000041
42#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
43 { \
44 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
45 << ": " \
46 << CHECK_LOCATION().AsString()).str()); \
47 }
telsoa01c577f2c2018-08-31 09:22:23 +010048
49using namespace armnn;
50using armnn::CheckLocation;
51namespace armnnTfLiteParser
52{
Kevin May7d96b162021-02-03 17:38:41 +000053
54ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
55 pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
56
57ITfLiteParser::~ITfLiteParser() = default;
58
59ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
60{
61 return new ITfLiteParser(options);
62}
63
64ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
65{
66 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
67}
68
69void ITfLiteParser::Destroy(ITfLiteParser* parser)
70{
71 delete parser;
72}
73
74armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
75{
76 return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
77}
78
Mike Kelly0d77ae12022-01-07 17:42:27 +000079armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
Kevin May7d96b162021-02-03 17:38:41 +000080{
81 return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
82}
83
84BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
85 const std::string& name) const
86{
87 return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
88}
89
90BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
91 const std::string& name) const
92{
93 return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
94}
95
96size_t ITfLiteParser::GetSubgraphCount() const
97{
98 return pTfLiteParserImpl->GetSubgraphCount();
99}
100
101std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
102{
103 return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
104}
105
106std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
107{
108 return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
109}
110
telsoa01c577f2c2018-08-31 09:22:23 +0100111namespace
112{
jimfly01c25411c2018-11-14 17:47:22 +0000113
telsoa01c577f2c2018-08-31 09:22:23 +0100114const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
115
Mike Kelly0d77ae12022-01-07 17:42:27 +0000116void CheckSubgraph(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100117 size_t subgraphIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000118 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100119{
120 if (model.get() == nullptr)
121 {
122 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100123 fmt::format("{} was called with invalid (null) model. "
124 "Possible reason is that the model is not yet loaded and Unpack(ed). "
125 "subgraph:{} at {}",
126 location.m_Function,
127 subgraphIndex,
128 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100129 }
130 else if (subgraphIndex >= model->subgraphs.size())
131 {
132 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100133 fmt::format("{} was called with an invalid subgraph index. "
134 "subgraph:{} at {}",
135 location.m_Function,
136 subgraphIndex,
137 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100138 }
139}
140
141#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
142 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
143
Mike Kelly0d77ae12022-01-07 17:42:27 +0000144void CheckModel(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100145 size_t subgraphIndex,
146 size_t operatorIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000147 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100148{
149 if (model.get() == nullptr)
150 {
151 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100152 fmt::format("{} was called with invalid (null) model. "
153 "Possible reason is that the model is not yet loaded and Unpack(ed). "
154 "subgraph:{} operator:{} at {}",
155 location.m_Function,
156 subgraphIndex,
157 operatorIndex,
158 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100159 }
160 else if (subgraphIndex >= model->subgraphs.size())
161 {
162 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100163 fmt::format("{} was called with an invalid subgraph index. "
164 "subgraph:{} operator:{} at {}",
165 location.m_Function,
166 subgraphIndex,
167 operatorIndex,
168 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100169 }
170 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
171 operatorIndex != VIRTUAL_OPERATOR_ID)
172 {
173 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100174 fmt::format("{} was called with an invalid operator index. "
175 "subgraph:{} operator:{} at {}",
176 location.m_Function,
177 subgraphIndex,
178 operatorIndex,
179 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100180 }
181}
182
183#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
184 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
185
Mike Kelly0d77ae12022-01-07 17:42:27 +0000186void CheckTensor(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100187 size_t subgraphIndex,
188 size_t tensorIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000189 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100190{
191 // not checking model, because I assume CHECK_MODEL already run
192 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100193 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100194
195 // also subgraph index should be checked by CHECK_MODEL so
196 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100197 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100198
199 // the tensor index is the only one to check here
200 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
201 {
202 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100203 fmt::format("{} was called with an invalid tensor index. "
204 "subgraph:{} tensor:{} at {}",
205 location.m_Function,
206 subgraphIndex,
207 tensorIndex,
208 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100209 }
210}
211
212#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
213 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
214
Kevin May7d96b162021-02-03 17:38:41 +0000215void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000216 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100217{
218 if (rawPtr == nullptr)
219 {
220 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100221 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100222 }
223}
224
225#define CHECK_TENSOR_PTR(TENSOR_PTR) \
226 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
227
Mike Kelly0d77ae12022-01-07 17:42:27 +0000228void CheckBuffer(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100229 size_t bufferIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000230 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100231{
232 if (model.get() == nullptr)
233 {
234 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100235 fmt::format("{} was called with invalid (null) model. "
236 "Possible reason is that the model is not yet loaded and Unpack(ed). "
237 "buffer:{} at {}",
238 location.m_Function,
239 bufferIndex,
240 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100241 }
242 else if (bufferIndex >= model->buffers.size())
243 {
244 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100245 fmt::format("{} was called with an invalid buffer index. "
246 "buffer index:{} at {}",
247 location.m_Function,
248 bufferIndex,
249 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100250 }
251 else if (model->buffers[bufferIndex].get() == nullptr)
252 {
253 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100254 fmt::format("The buffer #{} is null. {}",
255 bufferIndex,
256 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100257 }
258}
259
260#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
261 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
262
Kevin May7d96b162021-02-03 17:38:41 +0000263void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000264 const armnn::TensorInfo& tensorInfo,
telsoa01c577f2c2018-08-31 09:22:23 +0100265 uint32_t bufferId,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000266 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100267{
268 if (bufferPtr == nullptr)
269 {
270 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100271 fmt::format("BufferPtr is null for buffer:{}. {}",
272 bufferId,
273 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100274 }
275 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
276 tensorInfo.GetNumBytes() > bufferPtr->data.size())
277 {
278 std::stringstream ss;
279 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
280 << "For tensor: " << tensorInfo.GetShape()
281 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
282 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
283 throw ParseException(ss.str());
284 }
285}
286
Mike Kelly0d77ae12022-01-07 17:42:27 +0000287
288tflite::BuiltinOperator GetOpCode(const TfLiteParserImpl::ModelPtr& model, size_t subgraphIndex, size_t operatorIndex)
289{
290 const auto& operatorPtr = model->subgraphs[subgraphIndex]->operators[operatorIndex];
291 auto opcodeIndex = operatorPtr->opcode_index;
292
293// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
294#if defined(ARMNN_POST_TFLITE_2_3)
295 auto opcode = std::max(model->operator_codes[opcodeIndex]->builtin_code,
296 static_cast<tflite::BuiltinOperator>(model->operator_codes[opcodeIndex]->deprecated_builtin_code));
297#else
298 auto opcode = model->operator_codes[opcodeIndex]->builtin_code;
299#endif
300 return opcode;
301}
302
303std::vector<unsigned int> GetUIntBuffer(armnn::TensorInfo info,
304 const TfLiteParserImpl::ModelPtr& model,
305 size_t bufferIndex)
306{
307 TfLiteParserImpl::BufferRawPtr bufferPtr = TfLiteParserImpl::GetBuffer(model, bufferIndex);
308 std::vector<unsigned int> buffer(info.GetNumElements());
309
310 if (info.GetDataType() == DataType::Signed32)
311 {
312 ::memcpy(buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
313 }
314 else if (info.GetDataType() == DataType::Signed64)
315 {
316 std::vector<uint64_t> uint64Buffer(info.GetNumElements());
317 ::memcpy(uint64Buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
318 buffer.assign(std::begin(uint64Buffer), std::end(uint64Buffer));
319 }
Mike Kelly0506ef02023-01-03 16:29:44 +0000320 else
321 {
322 CheckLocation location = CHECK_LOCATION();
323 throw ParseException(
324 fmt::format("Unsupported data type for uint buffer {}, only Signed 32 or Signed 64 are supported. {}",
325 GetDataTypeName(info.GetDataType()),
326 location.AsString()));
327 }
Mike Kelly0d77ae12022-01-07 17:42:27 +0000328 return buffer;
329}
330
telsoa01c577f2c2018-08-31 09:22:23 +0100331#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
332 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
333
334bool IsActivationSupported(tflite::ActivationFunctionType activationType)
335{
336 switch(activationType)
337 {
338 case tflite::ActivationFunctionType_NONE:
339 case tflite::ActivationFunctionType_RELU:
340 case tflite::ActivationFunctionType_RELU6:
341 case tflite::ActivationFunctionType_TANH:
342 {
343 return true;
344 }
345 default:
346 {
347 return false;
348 }
349 }
350}
351
352#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
353 do { \
354 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
355 { \
356 throw ParseException( \
Mike Kelly377fb212023-01-10 15:55:28 +0000357 fmt::format("TfLite parser doesn't support fused activation: " \
James Ward58dec6b2020-09-11 17:32:44 +0100358 "{}/{} in {} subgraph:{} operator:{} at {}", \
359 OPTION->fused_activation_function, \
360 tflite::EnumNameActivationFunctionType(\
361 OPTION->fused_activation_function), \
362 __func__, \
363 SUBGRAPH_INDEX, \
364 OPERATOR_INDEX, \
365 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100366 } \
367 } while(false)
368
369
Mike Kelly0d77ae12022-01-07 17:42:27 +0000370std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t>& in)
telsoa01c577f2c2018-08-31 09:22:23 +0100371{
372 std::vector<unsigned int> result;
373 result.reserve(in.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +0000374 for (auto& i : in)
telsoa01c577f2c2018-08-31 09:22:23 +0100375 {
mathad01c21025d2021-04-26 10:09:37 +0100376 // If the location of the input data is -1 then the input should be ignored.
377 if (i == -1)
378 {
379 continue;
380 }
telsoa01c577f2c2018-08-31 09:22:23 +0100381 result.push_back(CHECKED_NON_NEGATIVE(i));
382 }
383 return result;
384}
385
Mike Kelly5880b912022-01-28 16:18:54 +0000386bool IsOptionalOperandPresent(int input)
387{
388 return (input >= 0);
389}
390
telsoa01c577f2c2018-08-31 09:22:23 +0100391void CalcPadding(uint32_t inputSize,
392 uint32_t filterSize,
393 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100394 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100395 uint32_t& paddingFront,
396 uint32_t& paddingBack,
397 tflite::Padding padding)
398{
399 paddingFront = 0;
400 paddingBack = 0;
401 if (padding == tflite::Padding_SAME)
402 {
403 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100404 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
405 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100406 if (temp > inputSize)
407 {
408 paddingFront = (temp - inputSize) / 2;
409 paddingBack = (temp - inputSize) - paddingFront;
410 }
411 }
412}
413
Kevin May7d96b162021-02-03 17:38:41 +0000414armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Finn Williamsb49ed182021-06-29 15:50:08 +0100415 const std::vector<unsigned int>& shape,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100416 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100417{
418 armnn::DataType type;
419 CHECK_TENSOR_PTR(tensorPtr);
420
421 switch (tensorPtr->type)
422 {
423 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000424 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100425 break;
426 case tflite::TensorType_FLOAT32:
427 type = armnn::DataType::Float32;
428 break;
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100429 case tflite::TensorType_FLOAT16:
430 type = armnn::DataType::Float16;
431 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000432 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000433 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000434 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000435 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000436 type = armnn::DataType::QAsymmS8;
437 }
438 else
439 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000440 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000441 type = armnn::DataType::QSymmS8;
442 }
Finn Williamsed66d142019-12-06 09:55:55 +0000443 break;
444 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000445 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000446 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100447 case tflite::TensorType_INT32:
448 type = armnn::DataType::Signed32;
449 break;
Inki Daed4619e22020-09-10 15:33:54 +0900450 case tflite::TensorType_INT64:
451 type = armnn::DataType::Signed64;
452 break;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100453 case tflite::TensorType_BOOL:
454 type = armnn::DataType::Boolean;
455 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100456 default:
457 {
458 CheckLocation location = CHECK_LOCATION();
459 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100460 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
461 tensorPtr->type,
462 tflite::EnumNameTensorType(tensorPtr->type),
463 tensorPtr->name,
464 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100465 }
466 }
Finn Williamsb49ed182021-06-29 15:50:08 +0100467 TensorShape tensorShape;
468
469 std::vector<unsigned int> safeShape = shape;
470 if (shape.size() == 0)
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100471 {
472 safeShape.push_back(1);
Finn Williamsb49ed182021-06-29 15:50:08 +0100473 }
474
475 if (!outputTensor)
476 {
477 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()), safeShape.data());
478 }
479 else
480 {
Rob Hughesd812a312021-08-06 13:10:53 +0100481 size_t shapeSignatureSize = tensorPtr->shape_signature.size();
Finn Williamsb49ed182021-06-29 15:50:08 +0100482
483 // If a shape signature exists we will use that to infer dynamic tensors
484 if (shapeSignatureSize != 0)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100485 {
Finn Williamsb49ed182021-06-29 15:50:08 +0100486 // If the shape is incompatible with the shape signature override the shape
487 if (shapeSignatureSize != shape.size())
488 {
489 safeShape = {};
490
491 for (unsigned int i = 0; i < shapeSignatureSize; ++i)
492 {
493 unsigned int dim = tensorPtr->shape_signature[i] > -1 ?
494 static_cast<unsigned int>(tensorPtr->shape_signature[i]) : 0;
495 safeShape.push_back(dim);
496 }
497 }
498
Rob Hughesd812a312021-08-06 13:10:53 +0100499 std::unique_ptr<bool[]> dimMask = std::make_unique<bool[]>(tensorPtr->shape_signature.size());
Mike Kelly04d82292023-01-19 18:29:40 +0000500 bool batchOnly = true;
Finn Williamsb49ed182021-06-29 15:50:08 +0100501 for (unsigned int i = 0; i < tensorPtr->shape_signature.size(); ++i)
502 {
Mike Kelly04d82292023-01-19 18:29:40 +0000503 dimMask[i] = tensorPtr->shape_signature[i] != -1;
504
505 if (i > 0 && !dimMask[i])
506 {
507 batchOnly = false;
508 }
509 }
510 if (batchOnly)
511 {
512 dimMask[0] = true;
Finn Williamsb49ed182021-06-29 15:50:08 +0100513 }
Rob Hughesd812a312021-08-06 13:10:53 +0100514 tensorShape = TensorShape(static_cast<unsigned int>(safeShape.size()), safeShape.data(), dimMask.get());
Finn Williamsb49ed182021-06-29 15:50:08 +0100515 }
516 // If there is no shape signature treat the tensor as dynamic if the shape has a size of zero
517 else if (shape.size() == 0)
518 {
519 tensorShape = TensorShape(1, false);
520 }
521 else
522 {
523 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(shape.size()), shape.data());
Sadik Armagand109a4d2020-07-28 10:42:13 +0100524 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100525 }
526
Keith Davisd305e1a2020-01-22 11:57:54 +0000527 float quantizationScale = 0.0f;
528 int32_t quantizationOffset = 0;
529
530 if (tensorPtr->quantization.get())
531 {
532 if (tensorPtr->quantization->scale.size() <= 1)
533 {
534 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
535 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
536
537 if (tensorPtr->quantization->scale.size() == 1)
538 {
539 quantizationScale = tensorPtr->quantization->scale[0];
540 }
541 if (tensorPtr->quantization->zero_point.size() == 1)
542 {
543 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000544 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100545 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000546 }
547
Sadik Armagand109a4d2020-07-28 10:42:13 +0100548 armnn::TensorInfo result(tensorShape,
549 type,
550 quantizationScale,
551 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000552 return result;
553 }
554 else
555 {
556 std::vector<float> quantizationScales;
557 std::vector<int32_t> quantizationOffsets;
558
559 // Scale
560 std::copy(tensorPtr->quantization->scale.begin(),
561 tensorPtr->quantization->scale.end(),
562 std::back_inserter(quantizationScales));
563
Keith Davis0c2eeac2020-02-11 16:51:50 +0000564 // QSymmS8 Per-axis
Sadik Armagand109a4d2020-07-28 10:42:13 +0100565 armnn::TensorInfo result(tensorShape,
566 type,
567 quantizationScales,
Jan Eilers7612bd62021-04-06 17:29:03 +0100568 armnn::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
Keith Davisd305e1a2020-01-22 11:57:54 +0000569 return result;
570 }
571 }
572 else
573 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100574 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000575 type,
576 quantizationScale,
577 quantizationOffset);
578 return result;
579 }
telsoa01c577f2c2018-08-31 09:22:23 +0100580}
581
Kevin May7d96b162021-02-03 17:38:41 +0000582armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Mike Kelly377fb212023-01-10 15:55:28 +0000583 const bool outputTensor = false)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100584{
Mike Kelly0d77ae12022-01-07 17:42:27 +0000585 auto const& dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100586 return ToTensorInfo(tensorPtr, dimensions, outputTensor);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100587}
588
telsoa01c577f2c2018-08-31 09:22:23 +0100589template<typename T>
590std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
Kevin May7d96b162021-02-03 17:38:41 +0000591CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
592 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000593 armnn::TensorInfo& tensorInfo,
594 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100595{
Jan Eilers8eb25602020-03-09 12:13:48 +0000596 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100597 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
598 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
James Ward58dec6b2020-09-11 17:32:44 +0100599 fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
telsoa01c577f2c2018-08-31 09:22:23 +0100600
601 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000602
603 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
604 {
605 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000606 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
607 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000608 }
609 else
610 {
611 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
612 }
613
Matthew Sloyan81beae32021-07-13 19:46:11 +0100614 // Make sure isConstant flag is set.
615 tensorInfo.SetConstant();
616
telsoa01c577f2c2018-08-31 09:22:23 +0100617 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
618}
619
telsoa01c577f2c2018-08-31 09:22:23 +0100620armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
621{
622 // generate the binding id by shifting the tensor id by 8 bit
623 // and add the subgraph id, which allows 256 subgraphs
624 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
625}
626
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000627bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
628{
629 const unsigned int actualSize = actual.GetNumDimensions();
630 if (actualSize != expected.size())
631 {
632 return false;
633 }
634
635 for (unsigned int i = 0u; i < actualSize; i++)
636 {
637 if (expected[i] < 0 ||
638 actual[i] != static_cast<unsigned int>(expected[i]))
639 {
640 return false;
641 }
642 }
643
644 return true;
645}
646
Cathal Corbett2b922e22022-09-23 15:49:24 +0100647bool CheckShape(const armnn::TensorShape& actual, const armnn::TensorShape& expected)
648{
649 std::vector<int32_t> expectedVec;
650 for (uint32_t i = 0; i < expected.GetNumDimensions(); i++)
651 {
652 expectedVec.push_back(expected[i]);
653 }
654 return CheckShape(actual, expectedVec);
655}
656
James Conroy05102392020-06-24 15:39:55 +0100657void CheckMatchingQuantization(const TensorInfo& first,
658 const TensorInfo& second,
659 const std::string& descName,
660 std::string const& firstName,
661 std::string const& secondName)
662{
663 if (!first.IsQuantized() ||
664 !second.IsQuantized())
665 {
666 // Not a quantized type, ignore the validation
667 return;
668 }
669
670 DataType firstDataType = first.GetDataType();
671 DataType secondDataType = second.GetDataType();
672
673 if (firstDataType != secondDataType)
674 {
675 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
676 " must be of the same quantized type, " +
677 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
678 secondName + " is " + GetDataTypeName(secondDataType));
679 }
680
681 if (!first.IsTypeSpaceMatch(second))
682 {
683 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
684 " must have the same quantization space, " +
685 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
686 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
687 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
688 " and scale " + std::to_string(second.GetQuantizationScale()));
689 }
690}
691
Mike Kelly377fb212023-01-10 15:55:28 +0000692bool IsDynamic(TfLiteParserImpl::TensorRawPtr tensorPtr)
693{
694 auto shape = tensorPtr->shape;
695
696 if (shape.empty())
697 {
698 return true;
699 }
700 auto shapeSig = tensorPtr->shape_signature;
701
702 if (shapeSig.empty())
703 {
704 return false;
705 }
706
707 for (unsigned int i = 0; i < shapeSig.size() ; ++i)
708 {
709 if (shapeSig[i] == -1)
710 {
711 return true;
712 }
713 }
714 return false;
715}
716
telsoa01c577f2c2018-08-31 09:22:23 +0100717} // <anonymous>
718
Kevin May7d96b162021-02-03 17:38:41 +0000719TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100720: m_Options(options)
721, m_Network(nullptr, nullptr)
Kevin May7d96b162021-02-03 17:38:41 +0000722, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
telsoa01c577f2c2018-08-31 09:22:23 +0100723{
724 // register supported operators
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100725 m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
Kevin May7d96b162021-02-03 17:38:41 +0000726 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100727 m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
728 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
Kevin May7d96b162021-02-03 17:38:41 +0000729 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
730 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
Samuel Yapfd3ba5a2022-08-24 17:04:34 +0100731 m_ParserFunctions[tflite::BuiltinOperator_BATCH_MATMUL] = &TfLiteParserImpl::ParseBatchMatMul;
mathad01b392e982021-04-07 12:07:30 +0100732 m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
Kevin May7d96b162021-02-03 17:38:41 +0000733 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
734 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100735 // Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
Cathal Corbett80b4ef02022-05-25 11:21:11 +0100736 #if defined(ARMNN_POST_TFLITE_2_4)
Matthew Sloyaneb5f8102021-10-05 17:31:42 +0100737 m_ParserFunctions[tflite::BuiltinOperator_CONV_3D] = &TfLiteParserImpl::ParseConv3D;
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100738 #endif
Kevin May7d96b162021-02-03 17:38:41 +0000739 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
740 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
741 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
742 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100743 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000744 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300745 m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000746 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
Teresa Charlin3ab85482021-06-08 16:59:29 +0100747 m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
Teresa Charlincdbd40b2022-02-25 13:21:55 +0000748 m_ParserFunctions[tflite::BuiltinOperator_FLOOR_DIV] = &TfLiteParserImpl::ParseFloorDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000749 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
750 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
Teresa Charlin91a53ea2022-04-25 15:47:29 +0100751 m_ParserFunctions[tflite::BuiltinOperator_GATHER_ND] = &TfLiteParserImpl::ParseGatherNd;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300752 m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
753 m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000754 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
755 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300756 m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
757 m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
Mike Kelly31dce2b2021-09-01 21:22:37 +0100758 m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
759 = &TfLiteParserImpl::ParseLocalResponseNormalization;
Teresa Charlin28aa6692022-07-12 11:18:44 +0100760 m_ParserFunctions[tflite::BuiltinOperator_LOG] = &TfLiteParserImpl::ParseLog;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100761 m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
Kevin May7d96b162021-02-03 17:38:41 +0000762 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
Teresa Charlinfd33a692022-06-29 15:35:57 +0100763 m_ParserFunctions[tflite::BuiltinOperator_LOG_SOFTMAX] = &TfLiteParserImpl::ParseLogSoftmax;
Kevin May7d96b162021-02-03 17:38:41 +0000764 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
765 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
766 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
767 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
768 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +0100769 m_ParserFunctions[tflite::BuiltinOperator_MIRROR_PAD] = &TfLiteParserImpl::ParseMirrorPad;
Kevin May7d96b162021-02-03 17:38:41 +0000770 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
771 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300772 m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000773 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
774 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
Mike Kelly0d77ae12022-01-07 17:42:27 +0000775 m_ParserFunctions[tflite::BuiltinOperator_PADV2] = &TfLiteParserImpl::ParsePad;
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +0100776 m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
Kevin May7d96b162021-02-03 17:38:41 +0000777 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
778 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
779 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
Sadik Armagana2747482021-02-09 10:28:54 +0000780 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
781 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
Teresa Charlin4e3e8312021-08-05 12:34:37 +0100782 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
Kevin May7d96b162021-02-03 17:38:41 +0000783 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
784 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
785 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100786 m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
Teresa Charlinf0fce5b2022-05-04 17:24:43 +0100787 m_ParserFunctions[tflite::BuiltinOperator_SQRT] = &TfLiteParserImpl::ParseSqrt;
Keith Davis0176fd82021-06-01 17:36:32 +0100788 m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
Teresa Charlin28aa6692022-07-12 11:18:44 +0100789 m_ParserFunctions[tflite::BuiltinOperator_SIN] = &TfLiteParserImpl::ParseSin;
Kevin May7d96b162021-02-03 17:38:41 +0000790 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
791 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
792 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
Teresa Charlin2a764ad2023-02-24 18:17:31 +0000793 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_DEPTH] = &TfLiteParserImpl::ParseSpaceToDepth;
Kevin May7d96b162021-02-03 17:38:41 +0000794 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
795 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
796 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
797 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
798 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
799 m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
800 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
801 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
802 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
Mike Kelly5880b912022-01-28 16:18:54 +0000803 m_ParserFunctions[tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM]
804 = &TfLiteParserImpl::ParseUnidirectionalSequenceLSTM;
Kevin May7d96b162021-02-03 17:38:41 +0000805 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100806
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100807 // register supported custom operators
Kevin May7d96b162021-02-03 17:38:41 +0000808 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100809}
810
Mike Kelly377fb212023-01-10 15:55:28 +0000811armnn::TensorInfo TfLiteParserImpl::InputTensorInfo(size_t subgraphIndex,
812 size_t operatorIndex,
813 int input)
814{
815 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
816 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
817
818 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[input]);
819 auto search = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(inputId);
820
821 if (search != m_TensorInfos.end())
822 {
823 return m_TensorInfos[inputId];
824 }
825 else
826 {
827 auto tensorInfo = ::armnnTfLiteParser::ToTensorInfo(subgraphPtr->tensors[inputId].get());
828 m_TensorInfos.insert({ inputId, tensorInfo });
829 return tensorInfo;
830 }
831}
832
833armnn::TensorInfo TfLiteParserImpl::OutputTensorInfoFromInputs(size_t subgraphIndex,
834 size_t operatorIndex,
835 armnn::IConnectableLayer* layer,
836 int output,
837 std::vector<int> inputs)
838{
839 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
840 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
841
842 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[output]);
843
844 auto outputSearch = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(outputId);
845
846 if (outputSearch != m_TensorInfos.end())
847 {
848 return m_TensorInfos[outputId];
849 }
850
851 const auto& outputTensorPtr = subgraphPtr->tensors[outputId].get();
852 TensorInfo tensor = ::armnnTfLiteParser::ToTensorInfo(outputTensorPtr, true);
853
854 if (IsDynamic(outputTensorPtr))
855 {
856 if (inputs.empty())
857 {
858 for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
859 {
860 inputs.emplace_back(i);
861 }
862 }
863 auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex);
864 std::vector<armnn::TensorShape> inputShapes;
865
866 for (unsigned int i = 0; i < inputs.size(); ++i)
867 {
868 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[inputs[i]]);
869 auto search = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(inputId);
870
871 if (search != m_TensorInfos.end())
872 {
873 auto &inputTensorInfo = m_TensorInfos[inputId];
874 inputShapes.push_back(inputTensorInfo.GetShape());
875 }
876 else
877 {
878 m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
879 auto inputTensorInfo = ::armnnTfLiteParser::ToTensorInfo(subgraphPtr->tensors[inputId].get());
880 m_TensorInfos.insert({ inputId, inputTensorInfo});
881 inputShapes.push_back(inputTensorInfo.GetShape());
882 }
883 }
884 const auto outputShape = layer->InferOutputShapes(inputShapes)[output];
885 tensor.SetShape(outputShape);
886 }
887 m_TensorInfos.insert({ outputId, tensor});
888 return tensor;
889}
890
891armnn::TensorInfo TfLiteParserImpl::OutputTensorInfoFromShapes(size_t subgraphIndex,
892 size_t operatorIndex,
893 armnn::IConnectableLayer* layer,
894 int output,
895 std::vector<armnn::TensorShape> inputShapes)
896{
897 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
898 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
899
900 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[output]);
901 const auto& outputTensorPtr = subgraphPtr->tensors[outputId].get();
902 TensorInfo tensor = ::armnnTfLiteParser::ToTensorInfo(outputTensorPtr, true);
903
904 if (IsDynamic(outputTensorPtr))
905 {
906 const auto outputShape = layer->InferOutputShapes(inputShapes)[output];
907 tensor.SetShape(outputShape);
908 }
909 m_TensorInfos.insert({ outputId, tensor});
910 return tensor;
911}
912
Kevin May7d96b162021-02-03 17:38:41 +0000913void TfLiteParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100914{
915 m_Network = armnn::INetworkPtr(nullptr, nullptr);
916 m_Model = nullptr;
917 m_SubgraphConnections.clear();
Mike Kelly377fb212023-01-10 15:55:28 +0000918 m_OverriddenOutputShapes.clear();
Mike Kelly5880b912022-01-28 16:18:54 +0000919 m_ConstantsToDequantize.clear();
920 m_ConstantsToBeCreated.clear();
Mike Kelly377fb212023-01-10 15:55:28 +0000921 m_TensorInfos.clear();
telsoa01c577f2c2018-08-31 09:22:23 +0100922}
923
Kevin May7d96b162021-02-03 17:38:41 +0000924INetworkPtr TfLiteParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100925{
926 ResetParser();
927 m_Model = LoadModelFromFile(graphFile);
928 return CreateNetworkFromModel();
929}
930
Mike Kelly0d77ae12022-01-07 17:42:27 +0000931INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
telsoa01c577f2c2018-08-31 09:22:23 +0100932{
933 ResetParser();
934 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
935 return CreateNetworkFromModel();
936}
937
Finn Williamsb49ed182021-06-29 15:50:08 +0100938
939armnn::INetworkPtr TfLiteParserImpl::LoadModel(std::unique_ptr<tflite::ModelT> model)
940{
941 ResetParser();
942 m_Model = std::move(model);
943
944 return CreateNetworkFromModel();
945}
946
Kevin May7d96b162021-02-03 17:38:41 +0000947INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
telsoa01c577f2c2018-08-31 09:22:23 +0100948{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100949
950 using NetworkOptions = std::vector<BackendOptions>;
951 NetworkOptions networkOptions = {};
Mike Kelly80512b02022-05-16 23:10:42 +0100952 if (m_Options)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100953 {
Mike Kelly80512b02022-05-16 23:10:42 +0100954 if (m_Options.value().m_InferAndValidate)
955 {
956 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
957 {
958 { "InferAndValidate", true }
959 });
Sadik Armagand109a4d2020-07-28 10:42:13 +0100960
Mike Kelly80512b02022-05-16 23:10:42 +0100961 networkOptions.push_back(shapeInferenceMethodOption);
962 }
963 if (m_Options.value().m_AllowExpandedDims)
964 {
965 BackendOptions shapeInferenceMethodOption("AllowExpandedDims",
966 {
967 { "AllowExpandedDims", true }
968 });
969
970 networkOptions.push_back(shapeInferenceMethodOption);
971 }
Sadik Armagand109a4d2020-07-28 10:42:13 +0100972 }
Sadik Armagand109a4d2020-07-28 10:42:13 +0100973 m_Network = INetwork::Create(networkOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100974 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100975
telsoa01c577f2c2018-08-31 09:22:23 +0100976 if (m_Model->subgraphs.size() != 1)
977 {
978 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100979 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
980 m_Model->subgraphs.size(),
981 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100982 }
983
984 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +0100985 size_t operatorIndex = 0;
986 try
telsoa01c577f2c2018-08-31 09:22:23 +0100987 {
Colm Donelan6350d272020-06-09 16:56:25 +0100988 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100989 {
Mike Kelly377fb212023-01-10 15:55:28 +0000990 SetupInputLayerTensorInfos(subgraphIndex);
991 SetupConstantLayerTensorInfos(subgraphIndex);
992
Colm Donelan6350d272020-06-09 16:56:25 +0100993 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
994 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +0100995 {
Colm Donelan6350d272020-06-09 16:56:25 +0100996 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
Jim Flynnfca233e2021-09-23 12:16:53 +0100997
998// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100999#if defined(ARMNN_POST_TFLITE_2_3)
Jim Flynnfca233e2021-09-23 12:16:53 +01001000 auto builtinCode = std::max(opCodePtr->builtin_code,
1001 static_cast<tflite::BuiltinOperator>(opCodePtr->deprecated_builtin_code));
1002#else
telsoa01c577f2c2018-08-31 09:22:23 +01001003 auto builtinCode = opCodePtr->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +01001004#endif
telsoa01c577f2c2018-08-31 09:22:23 +01001005
1006 if (builtinCode > tflite::BuiltinOperator_MAX)
1007 {
James Ward58dec6b2020-09-11 17:32:44 +01001008 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
1009 "subgraph:{} operator idx:{}. {}",
1010 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
1011 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001012 }
1013
1014 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +01001015 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +01001016 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +01001017 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +01001018 }
telsoa01c577f2c2018-08-31 09:22:23 +01001019
Colm Donelan6350d272020-06-09 16:56:25 +01001020 SetupInputLayers(subgraphIndex);
1021 SetupOutputLayers(subgraphIndex);
1022 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001023
Colm Donelan6350d272020-06-09 16:56:25 +01001024 ++subgraphIndex;
1025 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001026 }
telsoa01c577f2c2018-08-31 09:22:23 +01001027 }
Colm Donelan6350d272020-06-09 16:56:25 +01001028 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +01001029 {
Colm Donelan6350d272020-06-09 16:56:25 +01001030 std::stringstream errorString;
1031 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
1032 << subgraphIndex << " error: " << e.what();
1033 ARMNN_LOG(error) << errorString.str();
1034 std::stringstream errors;
1035 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +01001036 throw ParseException(errors.str());
1037 }
1038
1039 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +01001040 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001041 {
1042 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
1043 {
1044 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
1045 {
1046 for (size_t inputSlotIdx = 0;
1047 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
1048 ++inputSlotIdx)
1049 {
1050 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
1051 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
1052 }
1053 }
1054 }
1055 }
telsoa01c577f2c2018-08-31 09:22:23 +01001056 return std::move(m_Network);
1057}
1058
Mike Kelly0506ef02023-01-03 16:29:44 +00001059bool TfLiteParserImpl::ShouldConstantTensorBeConverted(TfLiteParserImpl::TensorRawPtr tensorPtr,
1060 armnn::DataType inputDataType,
1061 armnn::DataType tensorDataType)
Mike Kelly5880b912022-01-28 16:18:54 +00001062{
Mike Kelly0506ef02023-01-03 16:29:44 +00001063 return (TfLiteParserImpl::IsConstTensor(tensorPtr) && inputDataType == DataType::Float32 &&
1064 (tensorDataType == DataType::QAsymmU8 ||
1065 tensorDataType == DataType::QAsymmS8 ||
1066 tensorDataType == DataType::QSymmS8 ||
1067 tensorDataType == DataType::Signed32 ||
1068 tensorDataType == DataType::Signed64));
Mike Kelly5880b912022-01-28 16:18:54 +00001069}
1070
Kevin May7d96b162021-02-03 17:38:41 +00001071void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
1072 size_t tensorIndex,
1073 armnn::IOutputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +01001074{
1075 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001076 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
1077 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001078
1079 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
1080
Nikhil Rajd4d1c312022-08-03 18:20:59 +01001081 if (slot->GetOwningIConnectableLayer().GetType() != LayerType::Constant)
telsoa01c577f2c2018-08-31 09:22:23 +01001082 {
telsoa01c577f2c2018-08-31 09:22:23 +01001083
Nikhil Rajd4d1c312022-08-03 18:20:59 +01001084 // assuming there is only one producer for that tensor
1085 if (tensorSlots.outputSlot != nullptr)
1086 {
1087 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
1088 "subgraph:{} tensor:{} {}",
1089 subgraphIndex,
1090 tensorIndex,
1091 CHECK_LOCATION().AsString()));
1092 }
1093 }
telsoa01c577f2c2018-08-31 09:22:23 +01001094 tensorSlots.outputSlot = slot;
1095}
1096
Kevin May7d96b162021-02-03 17:38:41 +00001097void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
1098 size_t tensorIndex,
1099 armnn::IInputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +01001100{
1101 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001102 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
1103 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001104
Finn Williamsd4fa5452021-03-01 12:31:41 +00001105 TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01001106 tensorSlots.inputSlots.push_back(slot);
1107}
1108
Kevin May7d96b162021-02-03 17:38:41 +00001109void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001110{
1111 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1112
1113 // NOTE: By default we presume the custom operator is not supported
Kevin May7d96b162021-02-03 17:38:41 +00001114 auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001115
1116 // Identify custom code defined for custom operator
1117 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1118 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
1119
Mike Kelly377fb212023-01-10 15:55:28 +00001120 // Find parser function that corresponds to custom code (if any)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001121 auto iterator = m_CustomParserFunctions.find(customCode);
1122 if (iterator != m_CustomParserFunctions.end())
1123 {
1124 customParserFunction = iterator->second;
1125 }
1126
1127 // Run parser function
1128 (this->*customParserFunction)(subgraphIndex, operatorIndex);
1129}
1130
Kevin May7d96b162021-02-03 17:38:41 +00001131void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001132{
1133 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001134
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001135 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1136
1137 auto opcodeIndex = operatorPtr->opcode_index;
Jim Flynnfca233e2021-09-23 12:16:53 +01001138
1139// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001140#if defined(ARMNN_POST_TFLITE_2_3)
Jim Flynnfca233e2021-09-23 12:16:53 +01001141 auto opcode = std::max(m_Model->operator_codes[opcodeIndex]->builtin_code,
1142 static_cast<tflite::BuiltinOperator>(m_Model->operator_codes[opcodeIndex]->deprecated_builtin_code));
1143#else
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001144 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +01001145#endif
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001146
1147 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
1148 {
1149 // Do not add StandInLayer, throw ParseException instead
1150 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001151 fmt::format("Operator not supported. "
1152 "subgraph:{} operator:{} "
1153 "opcode_index:{} opcode:{} / {} {}",
1154 subgraphIndex,
1155 operatorIndex,
1156 opcodeIndex,
1157 opcode,
1158 tflite::EnumNameBuiltinOperator(opcode),
1159 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001160 }
1161
1162 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1163 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1164
Matthew Sloyan589e3e82020-09-11 16:17:48 +01001165 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
1166 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001167
1168 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +01001169 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001170
1171 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
1172 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001173 ARMNN_ASSERT(layer != nullptr);
1174
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001175 for (unsigned int i = 0u; i < numOutputs; ++i)
1176 {
Mike Kelly04d82292023-01-19 18:29:40 +00001177 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[0], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001178 }
1179
1180 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1181 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1182
1183 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
1184 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +01001185}
1186
mathad01b392e982021-04-07 12:07:30 +01001187void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex)
1188{
1189 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1190
1191 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1192 CHECK_VALID_SIZE(inputs.size(), 1);
1193 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1194 CHECK_VALID_SIZE(outputs.size(), 1);
1195
1196 auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex);
1197
1198 IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str());
1199 ARMNN_ASSERT(layer != nullptr);
1200
Mike Kelly377fb212023-01-10 15:55:28 +00001201 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
mathad01b392e982021-04-07 12:07:30 +01001202 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1203
1204 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1205 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1206
1207 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1208 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1209}
1210
Kevin May7d96b162021-02-03 17:38:41 +00001211void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001212{
1213 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1214
Mike Kelly0d77ae12022-01-07 17:42:27 +00001215 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1216 const auto* options = operatorPtr->builtin_options.AsConv2DOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01001217
1218 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1219
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001220 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1221 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1222 CHECK_VALID_SIZE(outputs.size(), 1);
1223
telsoa01c577f2c2018-08-31 09:22:23 +01001224 Convolution2dDescriptor desc;
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001225 inputs.size() == 3 ?
1226 desc.m_BiasEnabled = true : desc.m_BiasEnabled = false;
telsoa01c577f2c2018-08-31 09:22:23 +01001227 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1228 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001229 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +01001230 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1231 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001232
Mike Kelly377fb212023-01-10 15:55:28 +00001233 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1234 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001235
1236 // assuming input is NHWC
1237 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001238 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
telsoa01c577f2c2018-08-31 09:22:23 +01001239
1240 // assuming the filter is OHWI : Output, H, W, Input
1241 // which is essentially the same as NHWC
1242 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001243 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
telsoa01c577f2c2018-08-31 09:22:23 +01001244
Pablo Tellof0bd6832019-04-26 17:58:13 +01001245 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1246 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1247 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1248 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001249
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001250 // Add the first input and weights tensor to the registration list.
1251 // The constant weights will be added by SetupConstantLayers.
1252 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1253 std::vector<unsigned int> tensorIndexesToRegister = { inputTensorIndexes[0], inputTensorIndexes[1] };
telsoa01c577f2c2018-08-31 09:22:23 +01001254
James Ward58dec6b2020-09-11 17:32:44 +01001255 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001256 armnn::IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, layerName.c_str());
telsoa01c577f2c2018-08-31 09:22:23 +01001257
Mike Kelly0506ef02023-01-03 16:29:44 +00001258 if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
telsoa01c577f2c2018-08-31 09:22:23 +01001259 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001260 m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
telsoa01c577f2c2018-08-31 09:22:23 +01001261 }
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001262
1263 if (desc.m_BiasEnabled)
telsoa01c577f2c2018-08-31 09:22:23 +01001264 {
Mike Kelly377fb212023-01-10 15:55:28 +00001265 armnn::TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001266
1267 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1268 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
1269
Mike Kelly0506ef02023-01-03 16:29:44 +00001270 if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001271 {
1272 m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
1273 }
telsoa01c577f2c2018-08-31 09:22:23 +01001274 }
1275
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001276 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001277
Mike Kelly377fb212023-01-10 15:55:28 +00001278 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
jimfly01c25411c2018-11-14 17:47:22 +00001279 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001280
1281 // register the input connection slots for the layer, connections are made after all layers have been created
1282 // only the tensors for the inputs are relevant, exclude the const tensors
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001283 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
telsoa01c577f2c2018-08-31 09:22:23 +01001284
jimfly01c25411c2018-11-14 17:47:22 +00001285 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001286 // register the output connection slots for the layer, connections are made after all layers have been created
1287 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001288 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, { outputTensorIndexes[0] });
telsoa01c577f2c2018-08-31 09:22:23 +01001289}
1290
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001291// Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
Cathal Corbett80b4ef02022-05-25 11:21:11 +01001292#if defined(ARMNN_POST_TFLITE_2_4)
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001293void TfLiteParserImpl::ParseConv3D(size_t subgraphIndex, size_t operatorIndex)
1294{
1295 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1296
1297 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1298 const auto* options = operatorPtr->builtin_options.AsConv3DOptions();
1299
1300 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1301
1302 Convolution3dDescriptor desc;
1303 desc.m_BiasEnabled = false;
1304 desc.m_DataLayout = armnn::DataLayout::NDHWC;
1305 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1306 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1307 desc.m_StrideZ = CHECKED_NON_NEGATIVE(options->stride_d);
1308 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1309 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
1310 desc.m_DilationZ = CHECKED_NON_NEGATIVE(options->dilation_d_factor);
1311
1312 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1313 CHECK_VALID_SIZE(inputs.size(), 2, 3);
1314
1315 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1316 CHECK_VALID_SIZE(outputs.size(), 1);
1317
Mike Kelly377fb212023-01-10 15:55:28 +00001318 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1319 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001320
1321 // Assuming input is NDHWC
1322 unsigned int inputDepth = inputTensorInfo.GetShape()[1];
1323 unsigned int inputHeight = inputTensorInfo.GetShape()[2];
1324 unsigned int inputWidth = inputTensorInfo.GetShape()[3];
1325
1326 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
1327 unsigned int filterDepth = filterTensorInfo.GetShape()[0];
1328 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1329 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1330
1331 CalcPadding(inputDepth, filterDepth, desc.m_StrideZ,
Teresa Charlin502ab942022-03-23 17:23:07 +00001332 desc.m_DilationZ, desc.m_PadFront, desc.m_PadBack, options->padding);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001333 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1334 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1335 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1336 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
1337
Mike Kelly5880b912022-01-28 16:18:54 +00001338 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001339
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001340 auto layerName = fmt::format("Conv3D:{}:{}", subgraphIndex, operatorIndex);
1341
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001342 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1343 // Add the first input and weights tensor to the registration list.
1344 // The constant weights will be added by SetupConstantLayers.
1345 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
1346
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001347 if (inputs.size() == 3)
1348 {
1349 desc.m_BiasEnabled = true;
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001350
1351 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1352 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001353 }
1354
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001355 armnn::IConnectableLayer* layer = m_Network->AddConvolution3dLayer(desc, layerName.c_str());
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001356 ARMNN_ASSERT(layer != nullptr);
1357
Mike Kelly377fb212023-01-10 15:55:28 +00001358 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001359 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1360
1361 // Register the input connection slots for the layer, connections are made after all layers have been created
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001362 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001363
1364 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1365 // Register the output connection slots for the layer, connections are made after all layers have been created
1366 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1367 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1368}
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001369#endif
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001370
Kevin May7d96b162021-02-03 17:38:41 +00001371void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001372{
1373 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1374
Mike Kelly0d77ae12022-01-07 17:42:27 +00001375 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1376 const auto* options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01001377
1378 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1379
1380 DepthwiseConvolution2dDescriptor desc;
telsoa01c577f2c2018-08-31 09:22:23 +01001381 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1382 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001383 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001384 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +01001385
1386 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1387 CHECK_VALID_SIZE(inputs.size(), 2, 3);
Cathal Corbett06902652022-04-14 17:55:11 +01001388 if (inputs.size() == 3)
1389 {
1390 desc.m_BiasEnabled = true;
1391 }
1392
telsoa01c577f2c2018-08-31 09:22:23 +01001393 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1394 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +01001395 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1396 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001397
Mike Kelly377fb212023-01-10 15:55:28 +00001398 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1399 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001400
Matteo Martincigh747ef822018-12-18 09:26:39 +00001401 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +01001402 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1403 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +00001404
1405 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +01001406 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1407 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1408
Pablo Tellof0bd6832019-04-26 17:58:13 +01001409 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1410 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1411 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1412 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001413
Jan Eilers53ef7952021-06-02 12:01:25 +01001414 // ArmNN uses the same filter tensor layout at TfLite [1, H, W, O] no need for any permutation
James Ward58dec6b2020-09-11 17:32:44 +01001415 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001416
Cathal Corbett06902652022-04-14 17:55:11 +01001417 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1418 // Add the first input and weights tensor to the registration list.
1419 // The constant weights will be added by SetupConstantLayers.
1420 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
1421
1422 armnn::IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, layerName.c_str());
1423
1424 if (desc.m_BiasEnabled)
telsoa01c577f2c2018-08-31 09:22:23 +01001425 {
1426 desc.m_BiasEnabled = true;
Mike Kelly377fb212023-01-10 15:55:28 +00001427 TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Cathal Corbett06902652022-04-14 17:55:11 +01001428
1429 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1430 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
telsoa01c577f2c2018-08-31 09:22:23 +01001431 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001432 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001433
Mike Kelly377fb212023-01-10 15:55:28 +00001434 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
jimfly01c25411c2018-11-14 17:47:22 +00001435 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001436
1437 // register the input connection slots for the layer, connections are made after all layers have been created
1438 // only the tensors for the inputs are relevant, exclude the const tensors
Cathal Corbett06902652022-04-14 17:55:11 +01001439 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
telsoa01c577f2c2018-08-31 09:22:23 +01001440
jimfly01c25411c2018-11-14 17:47:22 +00001441 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001442 // register the output connection slots for the layer, connections are made after all layers have been created
1443 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1444 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1445}
1446
Kevin May7d96b162021-02-03 17:38:41 +00001447void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsed66d142019-12-06 09:55:55 +00001448{
1449 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1450
1451 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1452 CHECK_VALID_SIZE(inputs.size(), 1);
1453
1454 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1455 CHECK_VALID_SIZE(outputs.size(), 1);
1456
James Ward58dec6b2020-09-11 17:32:44 +01001457 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +00001458
1459 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001460 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +00001461
Mike Kelly377fb212023-01-10 15:55:28 +00001462 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Finn Williamsed66d142019-12-06 09:55:55 +00001463 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1464
1465 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1466 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1467
1468 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1469 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1470}
1471
Teresa Charlin3ab85482021-06-08 16:59:29 +01001472void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorIndex)
1473{
1474 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1475
1476 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1477 CHECK_VALID_SIZE(inputs.size(), 2);
1478
1479 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1480 CHECK_VALID_SIZE(outputs.size(), 1);
1481
1482 auto layerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
1483
Mike Kelly377fb212023-01-10 15:55:28 +00001484 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001485 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1486
1487 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1488
1489 ReshapeDescriptor reshapeDesc;
Finn Williamsb49ed182021-06-29 15:50:08 +01001490
1491 if (outputTensorInfo.GetShape().AreAllDimensionsSpecified())
1492 {
1493 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1494 }
1495 else
1496 {
1497 int32_t axis = inputs[1]->shape[0];
1498
1499 int32_t inputDimSize = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
1500
1501 if (axis > inputDimSize || axis < 0 - (inputDimSize + 1))
1502 {
1503 throw ParseException("axis must be in range [0 - (inputDimSize + 1), inputDimSize] inclusive");
1504 }
1505
1506 if(axis < 0)
1507 {
1508 axis = inputDimSize + axis + 1;
1509 }
1510
Rob Hughesd812a312021-08-06 13:10:53 +01001511 std::vector<unsigned int> shape(static_cast<unsigned int>(inputDimSize) + 1);
Finn Williamsb49ed182021-06-29 15:50:08 +01001512 unsigned int inputShapeIndex = 0;
1513 for (unsigned int i = 0; i < static_cast<unsigned int>(inputDimSize + 1); ++i)
1514 {
1515 if (i == static_cast<unsigned int>(axis))
1516 {
1517 shape[i] = 1;
1518 }
1519 else
1520 {
1521 shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
1522 ++inputShapeIndex;
1523 }
1524 }
1525
Rob Hughesd812a312021-08-06 13:10:53 +01001526 reshapeDesc.m_TargetShape = TensorShape(static_cast<unsigned int>(inputDimSize + 1), shape.data());
Finn Williamsb49ed182021-06-29 15:50:08 +01001527 }
Teresa Charlin3ab85482021-06-08 16:59:29 +01001528
1529 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1530 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001531
1532 reshapeDesc.m_TargetShape = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}).GetShape();
1533 outputTensorInfo.SetShape(reshapeDesc.m_TargetShape);
1534
Teresa Charlin3ab85482021-06-08 16:59:29 +01001535 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1536
1537 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1538 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1539
1540 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1541 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1542}
1543
Kevin May7d96b162021-02-03 17:38:41 +00001544void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
Keith Davis4cd29a02019-09-09 14:49:20 +01001545{
1546 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1547
1548 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001549 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001550
1551 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1552 CHECK_VALID_SIZE(outputs.size(), 1);
1553
James Ward58dec6b2020-09-11 17:32:44 +01001554 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001555 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001556
josh minorba424d22019-11-13 10:55:17 -06001557 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001558 {
Mike Kelly377fb212023-01-10 15:55:28 +00001559 armnn::TensorInfo permuteTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Kevin May85d92602019-09-27 17:21:06 +01001560 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001561 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1562 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001563 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001564 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001565
Mike Kelly08759e22020-03-02 11:41:31 +00001566 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001567 }
Mike Kelly377fb212023-01-10 15:55:28 +00001568 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Keith Davis4cd29a02019-09-09 14:49:20 +01001569
James Conroy05102392020-06-24 15:39:55 +01001570 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001571 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001572
1573 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1574 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001575 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1576
1577 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1578 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1579
1580 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1581 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1582}
1583
Kevin May7d96b162021-02-03 17:38:41 +00001584void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001585{
1586 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1587
Mike Kelly0d77ae12022-01-07 17:42:27 +00001588 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1589 const auto* options = operatorPtr->builtin_options.AsTransposeConvOptions();
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001590
1591 TransposeConvolution2dDescriptor desc;
1592 desc.m_BiasEnabled = false;
1593 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1594 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1595 desc.m_DataLayout = armnn::DataLayout::NHWC;
1596
1597 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
David Monahan61683802021-01-12 09:11:07 +00001598 if (inputs.size() == 4)
1599 {
1600 desc.m_BiasEnabled = true;
1601 }
1602 else
1603 {
1604 CHECK_VALID_SIZE(inputs.size(), 3);
1605 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001606
1607 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1608 CHECK_VALID_SIZE(outputs.size(), 1);
1609
Ryan OSheaf0a35b82023-02-21 18:32:30 +00001610 // This block determines the output shape of the transpose convolution. If the output shape tensor ptr is not null
1611 // And the tensor is a constant, we can access the data at load time and set the output shape of the
1612 // layer. If this is not constant, We do not have access to the shape data, so we have to use
1613 // infer output shape and skip this code block.
1614 if (inputs[0] && IsConstTensor(inputs[0]))
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001615 {
Mike Kelly377fb212023-01-10 15:55:28 +00001616 armnn::TensorInfo tensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001617 std::vector<int> output_shape(tensorInfo.GetNumElements());
Mike Kelly377fb212023-01-10 15:55:28 +00001618
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001619 if (tensorInfo.GetDataType() == DataType::Signed32)
1620 {
1621 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1622 }
1623 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1624 {
1625 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1626 {
1627 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1628 }
1629 }
1630 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1631 for (int dimension : output_shape)
1632 {
1633 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1634 }
1635 desc.m_OutputShapeEnabled = true;
1636 }
Mike Kelly377fb212023-01-10 15:55:28 +00001637 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
1638 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001639
1640 // TfLite uses NHWC tensors
1641 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1642 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1643
1644 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1645 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1646
1647 CalcPadding(inputHeight,
1648 filterHeight,
1649 desc.m_StrideY,
1650 1, // DilationY
1651 desc.m_PadTop,
1652 desc.m_PadBottom,
1653 options->padding);
1654
1655 CalcPadding(inputWidth,
1656 filterWidth,
1657 desc.m_StrideX,
1658 1, // DilationX
1659 desc.m_PadLeft,
1660 desc.m_PadRight,
1661 options->padding);
1662
Mike Kelly5880b912022-01-28 16:18:54 +00001663 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001664
1665 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001666 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001667
David Monahan61683802021-01-12 09:11:07 +00001668 if (desc.m_BiasEnabled)
1669 {
Mike Kelly377fb212023-01-10 15:55:28 +00001670 auto biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 3);
Mike Kelly5880b912022-01-28 16:18:54 +00001671 auto biasConstTensor = CreateConstTensorNonPermuted(inputs[3], biasTensorInfo, inputTensorInfo.GetDataType());
David Monahan61683802021-01-12 09:11:07 +00001672 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Mike Kelly5880b912022-01-28 16:18:54 +00001673 filterTensorAndData.first,
1674 biasConstTensor.first,
David Monahan61683802021-01-12 09:11:07 +00001675 layerName.c_str());
1676 }
1677 else
1678 {
1679 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Mike Kelly5880b912022-01-28 16:18:54 +00001680 filterTensorAndData.first,
David Monahan61683802021-01-12 09:11:07 +00001681 EmptyOptional(),
1682 layerName.c_str());
1683 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001684
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001685 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001686
Mike Kelly377fb212023-01-10 15:55:28 +00001687 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0 , { 2, 1 });
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001688 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1689
1690 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1691 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001692 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001693
1694 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1695 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1696}
1697
Kevin May7d96b162021-02-03 17:38:41 +00001698void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001699{
1700 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1701}
1702
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001703void TfLiteParserImpl::ParseBatchMatMul(size_t subgraphIndex, size_t operatorIndex)
1704{
1705 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1706
1707 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1708 CHECK_VALID_SIZE(inputs.size(), 2);
1709
1710 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1711 CHECK_VALID_SIZE(outputs.size(), 1);
1712
1713 auto layerName = fmt::format("BatchMatMul:{}:{}", subgraphIndex, operatorIndex);
1714
Mike Kelly377fb212023-01-10 15:55:28 +00001715 TensorInfo inputXTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1716 TensorInfo inputYTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001717
1718 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1719 const auto* options = operatorPtr->builtin_options.AsBatchMatMulOptions();
1720
Teresa Charlinbc37a6b2022-09-22 10:12:58 +01001721 // Adjoint in tensorflow lite performs transpose operation
1722 BatchMatMulDescriptor descriptor(options->adj_x,
1723 options->adj_y,
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001724 false,
Teresa Charlinbc37a6b2022-09-22 10:12:58 +01001725 false);
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001726 // Arbitrary DataLayout
1727
1728 IConnectableLayer* layer = m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str());
1729 ARMNN_ASSERT(layer != nullptr);
1730
Mike Kelly377fb212023-01-10 15:55:28 +00001731 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001732 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1733
1734 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1735 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1736
1737 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1738 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1739}
1740
Kevin May7d96b162021-02-03 17:38:41 +00001741void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001742{
1743 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1744
1745 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1746 CHECK_VALID_SIZE(inputs.size(), 3);
1747
1748 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1749 CHECK_VALID_SIZE(outputs.size(), 1);
1750
Mike Kelly377fb212023-01-10 15:55:28 +00001751 armnn::TensorInfo blockShapeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001752 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1753
Mike Kelly377fb212023-01-10 15:55:28 +00001754 armnn::TensorInfo cropsTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001755 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1756
1757 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1758 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1759
1760 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1761 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1762
1763 size_t step = 2;
1764 std::vector<std::pair<unsigned int, unsigned int>> crops;
1765 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1766 {
1767 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1768 }
1769
1770 armnn::BatchToSpaceNdDescriptor desc;
1771 desc.m_BlockShape = blockShape;
1772 desc.m_Crops = crops;
1773 desc.m_DataLayout = armnn::DataLayout::NHWC;
1774
James Ward58dec6b2020-09-11 17:32:44 +01001775 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001776
Mike Kelly377fb212023-01-10 15:55:28 +00001777 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01001778
1779 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1780 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001781
1782 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1783 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001784 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1785
1786 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1787 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1788
1789 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1790 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1791}
1792
Kevin May7d96b162021-02-03 17:38:41 +00001793void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson28c94572019-07-18 10:47:03 +01001794{
1795 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1796
1797 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1798 CHECK_VALID_SIZE(inputs.size(), 1);
1799
1800 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1801 CHECK_VALID_SIZE(outputs.size(), 1);
1802
1803 L2NormalizationDescriptor desc;
1804 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001805 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001806 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1807
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001808 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001809
Mike Kelly377fb212023-01-10 15:55:28 +00001810 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Jackson28c94572019-07-18 10:47:03 +01001811 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1812
1813 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1814 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1815
1816 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1817 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1818}
1819
Kevin May7d96b162021-02-03 17:38:41 +00001820void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001821{
1822 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1823}
1824
Kevin May7d96b162021-02-03 17:38:41 +00001825void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001826{
1827 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1828
1829 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1830 CHECK_VALID_SIZE(inputs.size(), 2);
1831
1832 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1833 CHECK_VALID_SIZE(outputs.size(), 1);
1834
James Ward58dec6b2020-09-11 17:32:44 +01001835 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001836
Mike Kelly377fb212023-01-10 15:55:28 +00001837 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1838 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
James Conroy05102392020-06-24 15:39:55 +01001839 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001840
Mike Kelly3ec30772023-03-08 13:47:17 +00001841 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Maximum, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001842 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001843
1844 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
1845 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001846 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1847
1848 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001849 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001850
1851 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1852 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1853}
1854
Kevin May7d96b162021-02-03 17:38:41 +00001855void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001856{
1857 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1858
1859 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1860 CHECK_VALID_SIZE(inputs.size(), 2);
1861
1862 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1863 CHECK_VALID_SIZE(outputs.size(), 1);
1864
James Ward58dec6b2020-09-11 17:32:44 +01001865 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001866
Mike Kelly377fb212023-01-10 15:55:28 +00001867 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1868 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
James Conroy05102392020-06-24 15:39:55 +01001869 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001870
Mike Kelly3ec30772023-03-08 13:47:17 +00001871 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Minimum, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001872 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001873
1874 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
1875 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001876 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1877
1878 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001879 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001880
1881 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1882 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1883}
1884
Kevin May7d96b162021-02-03 17:38:41 +00001885void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
1886 size_t operatorIndex,
1887 PoolingAlgorithm algorithm)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001888{
1889 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1890
Mike Kelly0d77ae12022-01-07 17:42:27 +00001891 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1892 const auto* options = operatorPtr->builtin_options.AsPool2DOptions();
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001893
1894 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1895
1896 std::string layerName;
1897
1898 switch (algorithm)
1899 {
1900 case PoolingAlgorithm::Average:
1901 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001902 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001903 break;
1904 case PoolingAlgorithm::Max:
1905 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001906 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001907 break;
1908 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001909 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001910 }
1911
1912 Pooling2dDescriptor desc;
1913
1914 desc.m_PoolType = algorithm;
1915 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1916 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1917 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1918 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1919 desc.m_PaddingMethod = PaddingMethod::Exclude;
1920 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001921 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001922
1923 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1924 CHECK_VALID_SIZE(inputs.size(), 1);
Mike Kelly377fb212023-01-10 15:55:28 +00001925 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001926
1927 // assuming input is NHWC
1928 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1929 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1930
Pablo Tellof0bd6832019-04-26 17:58:13 +01001931 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1932 desc.m_PadTop, desc.m_PadBottom, options->padding);
1933 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1934 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001935
1936 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1937 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001938
James Conroy05102392020-06-24 15:39:55 +01001939 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1940 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001941
1942 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1943 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
jimfly01c25411c2018-11-14 17:47:22 +00001944 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001945
1946 // register the input connection slots for the layer, connections are made after all layers have been created
1947 // only the tensors for the inputs are relevant, exclude the const tensors
1948 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001949 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001950
jimfly01c25411c2018-11-14 17:47:22 +00001951 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001952 // register the output connection slots for the layer, connections are made after all layers have been created
1953 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1954 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1955}
1956
Kevin May7d96b162021-02-03 17:38:41 +00001957void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
josh minorba424d22019-11-13 10:55:17 -06001958{
1959 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1960
1961 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1962 CHECK_VALID_SIZE(inputs.size(), 3);
1963 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1964 CHECK_VALID_SIZE(outputs.size(), 1);
1965
1966 SliceDescriptor desc;
1967
1968 // set begin tensor info for slice descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00001969 armnn::TensorInfo beginTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
josh minorba424d22019-11-13 10:55:17 -06001970 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1971
1972 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1973 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1974
1975 // set size tensor info for slice descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00001976 armnn::TensorInfo sizeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
josh minorba424d22019-11-13 10:55:17 -06001977 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1978
Cathal Corbettde33dda2022-09-20 16:40:09 +01001979 std::vector<int> signedSize(sizeTensorInfo.GetNumElements(), 1);
1980
1981 // if size buffer data is not specified, all contents of size vector remain as values of 1
1982 if (sizeBufferPtr->data.data())
1983 {
1984 ::memcpy(signedSize.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1985 }
1986
josh minorba424d22019-11-13 10:55:17 -06001987 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
Mike Kelly377fb212023-01-10 15:55:28 +00001988 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly7ba84d62021-09-10 15:27:19 +01001989
1990 for (unsigned int i = 0; i < signedSize.size(); ++i)
1991 {
1992 int signedValue = signedSize[i];
Jim Flynnfca233e2021-09-23 12:16:53 +01001993
Mike Kelly7ba84d62021-09-10 15:27:19 +01001994 if (signedValue < -1 || signedValue > static_cast<int>(inputTensorInfo.GetShape()[i] - begin[i]))
1995 {
1996 throw ParseException(fmt::format("Invalid value for size {} size must be in range "
1997 "[-1, inputDimSize - begin] [-1, {}] inclusive {}",
1998 signedValue,
1999 inputTensorInfo.GetShape()[i] - begin[i],
2000 CHECK_LOCATION().AsString()));
2001 }
2002
2003 if (signedValue == -1)
2004 {
2005 size[i] = inputTensorInfo.GetShape()[i] - begin[i];
2006 }
2007 else
2008 {
2009 size[i] = static_cast<unsigned int>(signedValue);
2010 }
2011 }
2012
josh minorba424d22019-11-13 10:55:17 -06002013 desc = SliceDescriptor(begin, size);
2014
James Ward58dec6b2020-09-11 17:32:44 +01002015 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06002016
James Conroy05102392020-06-24 15:39:55 +01002017 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
Mike Kelly377fb212023-01-10 15:55:28 +00002018
2019 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2020 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
josh minorba424d22019-11-13 10:55:17 -06002021 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2022
2023 // register the input connection slots for the layer, connections are made after all layers have been created
2024 // only the tensors for the inputs are relevant, exclude the const tensors
2025 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2026 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2027
2028 // register the output connection slots for the layer, connections are made after all layers have been created
2029 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2030 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2031}
2032
Kevin May7d96b162021-02-03 17:38:41 +00002033void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01002034{
2035 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002036 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2037 const auto* options = operatorPtr->builtin_options.AsSoftmaxOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01002038
2039 SoftmaxDescriptor desc;
2040 desc.m_Beta = options->beta;
2041
2042 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2043 CHECK_VALID_SIZE(inputs.size(), 1);
2044 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2045 CHECK_VALID_SIZE(outputs.size(), 1);
2046
James Ward58dec6b2020-09-11 17:32:44 +01002047 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01002048 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
2049
Mike Kelly377fb212023-01-10 15:55:28 +00002050 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
telsoa01c577f2c2018-08-31 09:22:23 +01002051 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2052
2053 // register the input connection slots for the layer, connections are made after all layers have been created
2054 // only the tensors for the inputs are relevant, exclude the const tensors
2055 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2056 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2057
2058 // register the output connection slots for the layer, connections are made after all layers have been created
2059 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2060 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2061}
2062
Teresa Charlinfd33a692022-06-29 15:35:57 +01002063void TfLiteParserImpl::ParseLogSoftmax(size_t subgraphIndex, size_t operatorIndex)
2064{
2065 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2066
2067 LogSoftmaxDescriptor desc;
2068
2069 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2070 CHECK_VALID_SIZE(inputs.size(), 1);
2071 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2072 CHECK_VALID_SIZE(outputs.size(), 1);
2073
2074 auto layerName = fmt::format("LogSoftmax:{}:{}", subgraphIndex, operatorIndex);
2075 IConnectableLayer* const layer = m_Network->AddLogSoftmaxLayer(desc, layerName.c_str());
2076
Mike Kelly377fb212023-01-10 15:55:28 +00002077 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Teresa Charlinfd33a692022-06-29 15:35:57 +01002078 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2079
2080 // register the input connection slots for the layer, connections are made after all layers have been created
2081 // only the tensors for the inputs are relevant, exclude the const tensors
2082 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2083 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2084
2085 // register the output connection slots for the layer, connections are made after all layers have been created
2086 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2087 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2088}
2089
Kevin May7d96b162021-02-03 17:38:41 +00002090void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002091{
2092 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2093
2094 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2095 CHECK_VALID_SIZE(inputs.size(), 3);
2096
2097 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2098 CHECK_VALID_SIZE(outputs.size(), 1);
2099
Mike Kelly377fb212023-01-10 15:55:28 +00002100 armnn::TensorInfo blockShapeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002101 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2102
Mike Kelly377fb212023-01-10 15:55:28 +00002103 armnn::TensorInfo padListTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002104 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2105
2106 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
2107 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
2108
2109 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
2110 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
2111
2112 size_t step = 2;
2113 std::vector<std::pair<unsigned int, unsigned int>> padList;
2114 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
2115 {
2116 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
2117 }
2118
2119 armnn::SpaceToBatchNdDescriptor desc;
2120 desc.m_BlockShape = blockShape;
2121 desc.m_PadList = padList;
2122 desc.m_DataLayout = armnn::DataLayout::NHWC;
2123
James Ward58dec6b2020-09-11 17:32:44 +01002124 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002125
Mike Kelly377fb212023-01-10 15:55:28 +00002126 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01002127
2128 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
2129 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00002130
2131 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2132 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002133 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2134
2135 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2136 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2137
2138 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2139 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2140}
2141
Teresa Charlin2a764ad2023-02-24 18:17:31 +00002142void TfLiteParserImpl::ParseSpaceToDepth(size_t subgraphIndex, size_t operatorIndex)
2143{
2144 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2145
2146 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2147 CHECK_VALID_SIZE(inputs.size(), 1);
2148 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2149 CHECK_VALID_SIZE(outputs.size(), 1);
2150
2151 armnn::SpaceToDepthDescriptor descriptor;
2152
2153 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2154 const auto* options = operatorPtr->builtin_options.AsSpaceToDepthOptions();
2155 auto blockSize = options->block_size;
2156 if (blockSize < 2)
2157 {
2158 throw ParseException(
2159 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
2160 blockSize,
2161 CHECK_LOCATION().AsString()));
2162 }
2163 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
2164
2165 auto layerName = fmt::format("SpaceToDepth:{}:{}", subgraphIndex, operatorIndex);
2166 IConnectableLayer* layer = m_Network->AddSpaceToDepthLayer(descriptor, layerName.c_str());
2167 ARMNN_ASSERT(layer != nullptr);
2168 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2169 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2170
2171 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2172 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2173
2174 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2175 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2176}
2177
Teresa Charlin3ab85482021-06-08 16:59:29 +01002178armnn::TensorInfo TfLiteParserImpl::OutputShapeOfSqueeze(std::vector<uint32_t> squeezeDims,
Mike Kelly0d77ae12022-01-07 17:42:27 +00002179 const armnn::TensorInfo& inputTensorInfo)
telsoa01c577f2c2018-08-31 09:22:23 +01002180{
Teresa Charlin3ab85482021-06-08 16:59:29 +01002181 CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
telsoa01c577f2c2018-08-31 09:22:23 +01002182 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2183
2184 if (inputTensorInfo.GetNumDimensions() > 4)
2185 {
2186 std::stringstream ss;
2187 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2188 << " shape:" << inputTensorInfo.GetShape() << " "
2189 << CHECK_LOCATION().AsString();
2190 throw ParseException(ss.str());
2191 }
2192
2193 if (squeezeDims.empty())
2194 {
2195 squeezeDims.assign(dimensionSequence,
2196 dimensionSequence+inputTensorInfo.GetNumDimensions());
2197 }
2198
2199 std::vector<uint32_t> outputDims;
2200 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2201 {
2202 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2203 auto currentDimension = inputTensorInfo.GetShape()[i];
2204 if (skipSqueeze || currentDimension != 1)
2205 {
2206 outputDims.push_back(currentDimension);
2207 }
2208 }
2209
2210 if (outputDims.size() > 4)
2211 {
2212 std::stringstream ss;
2213 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2214 << " shape:" << inputTensorInfo.GetShape() << " "
2215 << CHECK_LOCATION().AsString();
2216 throw ParseException(ss.str());
2217 }
2218
2219 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2220 outputDims.data());
2221
2222 // we need to preserve the tensor type and the quantization data as well
2223 TensorInfo outTensorInfo = inputTensorInfo;
2224 outTensorInfo.SetShape(outShape);
2225
2226 return outTensorInfo;
2227}
2228
Keith Davis0176fd82021-06-01 17:36:32 +01002229void TfLiteParserImpl::ParseShape(size_t subgraphIndex, size_t operatorIndex)
2230{
2231 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2232
2233 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2234 CHECK_VALID_SIZE(inputs.size(), 1);
2235 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2236 CHECK_VALID_SIZE(outputs.size(), 1);
2237
2238 auto layerName = fmt::format("Shape:{}:{}", subgraphIndex, operatorIndex);
2239
2240 IConnectableLayer* layer = m_Network->AddShapeLayer(layerName.c_str());
2241 ARMNN_ASSERT(layer != nullptr);
2242
Mike Kelly377fb212023-01-10 15:55:28 +00002243 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Keith Davis0176fd82021-06-01 17:36:32 +01002244 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2245
2246 // Check if output tensor type is Signed32 or Signed64
2247 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
2248 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
2249 {
2250 throw ParseException(
2251 fmt::format(
2252 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
2253 CHECK_LOCATION().AsString()));
2254 }
2255
2256 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2257 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2258
2259 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2260 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2261}
2262
Kevin May7d96b162021-02-03 17:38:41 +00002263void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01002264{
2265 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2266
2267 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2268 CHECK_VALID_SIZE(inputs.size(), 1);
2269
2270 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2271 CHECK_VALID_SIZE(outputs.size(), 1);
2272
2273 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2274 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002275 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01002276
Mike Kelly377fb212023-01-10 15:55:28 +00002277 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Teresa Charlin3ab85482021-06-08 16:59:29 +01002278
2279 std::vector<uint32_t> squeezeDim;
2280 // A single negative dim index is interpreted as a negative index in python
2281 // Meaning the index will be the shape size plus the negative index value
2282 if (options->squeeze_dims.size() == 1 && options->squeeze_dims[0] < 0)
2283 {
2284 int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
2285 squeezeDim.push_back(static_cast<uint32_t>(dim));
2286 }
2287 else
2288 {
2289 squeezeDim = AsUnsignedVector(options->squeeze_dims);
2290 }
2291
2292 armnn::TensorInfo outputTensorInfo = TfLiteParserImpl::OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
2293
James Conroy05102392020-06-24 15:39:55 +01002294 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01002295
2296 ReshapeDescriptor reshapeDesc;
2297 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
2298
Mike Kellyb2293702023-02-14 17:16:12 +00002299 auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
2300 m_TensorInfos[outputTensorIds[0]] = outputTensorInfo;
2301
telsoa01c577f2c2018-08-31 09:22:23 +01002302 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002303 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01002304 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2305
2306 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2307 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2308
2309 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2310 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2311}
2312
Kevin May7d96b162021-02-03 17:38:41 +00002313void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002314{
2315 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2316
2317 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2318 CHECK_VALID_SIZE(inputs.size(), 4);
2319
2320 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2321 CHECK_VALID_SIZE(outputs.size(), 1);
2322
Mike Kelly0d77ae12022-01-07 17:42:27 +00002323 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2324 const auto* options = operatorPtr->builtin_options.AsStridedSliceOptions();
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002325
2326 StridedSliceDescriptor desc;
2327 desc.m_BeginMask = options->begin_mask;
2328 desc.m_EllipsisMask = options->ellipsis_mask;
2329 desc.m_EndMask = options->end_mask;
2330 desc.m_NewAxisMask = options->new_axis_mask;
2331 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
2332 desc.m_DataLayout = armnn::DataLayout::NHWC;
2333
Mike Kelly377fb212023-01-10 15:55:28 +00002334 armnn::TensorInfo beginTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002335 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2336
2337 std::vector<int> begin(beginTensorInfo.GetNumElements());
2338 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
2339
Mike Kelly377fb212023-01-10 15:55:28 +00002340 armnn::TensorInfo endTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002341 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2342
2343 std::vector<int> end(endTensorInfo.GetNumElements());
2344 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
2345
Mike Kelly377fb212023-01-10 15:55:28 +00002346 armnn::TensorInfo strideTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 3);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002347 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
2348
2349 std::vector<int> stride(strideTensorInfo.GetNumElements());
2350 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
2351
2352 desc.m_Begin = begin;
2353 desc.m_End = end;
2354 desc.m_Stride = stride;
2355
James Ward58dec6b2020-09-11 17:32:44 +01002356 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002357 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002358 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002359
Mike Kelly377fb212023-01-10 15:55:28 +00002360 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002361 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2362
2363 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2364 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2365
2366 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2367 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2368}
2369
Kevin May7d96b162021-02-03 17:38:41 +00002370void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002371{
2372 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2373
Mike Kelly0d77ae12022-01-07 17:42:27 +00002374 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2375 const auto* options = operatorPtr->builtin_options.AsSubOptions();
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002376
2377 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2378 CHECK_VALID_SIZE(inputs.size(), 2);
2379
2380 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2381 CHECK_VALID_SIZE(outputs.size(), 1);
2382
Mike Kelly377fb212023-01-10 15:55:28 +00002383 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2384 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002385
James Ward58dec6b2020-09-11 17:32:44 +01002386 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002387 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Sub, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002388 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002389
Mike Kelly377fb212023-01-10 15:55:28 +00002390 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002391 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2392
2393 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002394 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002395
2396 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2397
2398 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2399 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2400}
2401
Kevin May7d96b162021-02-03 17:38:41 +00002402void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302403{
2404 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2405
Mike Kelly0d77ae12022-01-07 17:42:27 +00002406 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2407 const auto* options = operatorPtr->builtin_options.AsDivOptions();
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302408
2409 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2410 CHECK_VALID_SIZE(inputs.size(), 2);
2411
2412 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2413 CHECK_VALID_SIZE(outputs.size(), 1);
2414
Mike Kelly377fb212023-01-10 15:55:28 +00002415 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2416 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302417
James Ward58dec6b2020-09-11 17:32:44 +01002418 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002419 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002420 ARMNN_ASSERT(layer != nullptr);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302421
Mike Kelly377fb212023-01-10 15:55:28 +00002422 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302423 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2424
2425 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002426 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302427 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2428
2429 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2430 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2431}
2432
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002433void TfLiteParserImpl::ParseFloorDiv(size_t subgraphIndex, size_t operatorIndex)
2434{
2435 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2436
2437 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2438 CHECK_VALID_SIZE(inputs.size(), 2);
2439
2440 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2441 CHECK_VALID_SIZE(outputs.size(), 1);
2442
Mike Kelly377fb212023-01-10 15:55:28 +00002443 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2444 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002445
2446 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002447 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str());
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002448 ARMNN_ASSERT(layer != nullptr);
2449
Mike Kelly377fb212023-01-10 15:55:28 +00002450 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002451 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2452
2453 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2454 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2455 layer = AddFusedFloorLayer(layer, 0);
2456
2457 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2458 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2459}
2460
Kevin May7d96b162021-02-03 17:38:41 +00002461void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002462{
2463 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2464
Mike Kelly0d77ae12022-01-07 17:42:27 +00002465 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2466 const auto* options = operatorPtr->builtin_options.AsAddOptions();
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002467
2468 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2469 CHECK_VALID_SIZE(inputs.size(), 2);
2470
2471 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2472 CHECK_VALID_SIZE(outputs.size(), 1);
2473
Mike Kelly377fb212023-01-10 15:55:28 +00002474 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2475 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002476
James Ward58dec6b2020-09-11 17:32:44 +01002477 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002478 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Add, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002479 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002480
Mike Kelly377fb212023-01-10 15:55:28 +00002481 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002482 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2483
2484 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002485 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002486 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2487
2488 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2489 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2490}
2491
Kevin May7d96b162021-02-03 17:38:41 +00002492void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002493{
2494 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2495
Mike Kelly0d77ae12022-01-07 17:42:27 +00002496 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2497 const auto* options = operatorPtr->builtin_options.AsMulOptions();
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002498
2499 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2500 CHECK_VALID_SIZE(inputs.size(), 2);
2501
2502 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2503 CHECK_VALID_SIZE(outputs.size(), 1);
2504
Mike Kelly377fb212023-01-10 15:55:28 +00002505 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2506 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002507
James Ward58dec6b2020-09-11 17:32:44 +01002508 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly3ec30772023-03-08 13:47:17 +00002509 IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Mul, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002510 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002511
Mike Kelly377fb212023-01-10 15:55:28 +00002512 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002513 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2514
2515 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002516 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002517 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2518
2519 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2520 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2521}
2522
Kevin May7d96b162021-02-03 17:38:41 +00002523void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002524{
2525 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2526
2527 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2528
2529 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2530 CHECK_VALID_SIZE(outputs.size(), 1);
2531
Teresa Charlin046e2cb2023-03-28 17:20:19 +01002532 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2533 TensorInfo dimTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002534
2535 armnn::MeanDescriptor desc;
Teresa Charlin046e2cb2023-03-28 17:20:19 +01002536 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2537 // Get const axis value from model and set it to descriptor.
2538 if (axisBufferPtr != nullptr)
2539 {
2540 std::vector<int32_t> axisData(dimTensorInfo.GetNumElements());
2541 ::memcpy(axisData.data(), axisBufferPtr->data.data(), dimTensorInfo.GetNumBytes());
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002542
Teresa Charlin046e2cb2023-03-28 17:20:19 +01002543 // Convert the axis to unsigned int and remove duplicates.
2544 auto rank = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
2545 std::set<unsigned int> uniqueAxis;
2546 std::transform(axisData.begin(),
2547 axisData.end(),
2548 std::inserter(uniqueAxis, uniqueAxis.begin()),
2549 [rank](int i)->unsigned int{
2550 return static_cast<uint32_t>(((i + rank) % rank)); });
2551 desc.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2552 }
2553 else
2554 {
2555 for (uint32_t i = 0; i < inputTensorInfo.GetNumDimensions(); ++i)
2556 {
2557 desc.m_Axis.push_back(i);
2558 }
2559 }
2560
Sadik Armagand109a4d2020-07-28 10:42:13 +01002561 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002562
Teresa Charlin046e2cb2023-03-28 17:20:19 +01002563 desc.m_KeepDims = inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ? true : false;
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002564
James Ward58dec6b2020-09-11 17:32:44 +01002565 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002566 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002567 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002568
Mike Kelly377fb212023-01-10 15:55:28 +00002569 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002570 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2571
2572 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2573 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2574
2575 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2576 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2577}
2578
Kevin May7d96b162021-02-03 17:38:41 +00002579void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002580{
2581 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2582
Kevin May7d96b162021-02-03 17:38:41 +00002583 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002584
Kevin May7d96b162021-02-03 17:38:41 +00002585 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002586 CHECK_VALID_SIZE(outputs.size(), 1);
2587
Mike Kelly377fb212023-01-10 15:55:28 +00002588 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2589 armnn::TensorInfo padTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002590
Mike Kelly0d77ae12022-01-07 17:42:27 +00002591 std::vector<unsigned int> padBuffer = GetUIntBuffer(padTensorInfo, m_Model, inputs[1]->buffer);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002592
2593 size_t step = 2;
2594 armnn::PadDescriptor desc;
Mike Kelly0d77ae12022-01-07 17:42:27 +00002595 auto opcode = GetOpCode(m_Model, subgraphIndex, operatorIndex);
2596
2597 if (opcode == tflite::BuiltinOperator_PAD)
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002598 {
Mike Kelly0d77ae12022-01-07 17:42:27 +00002599 CHECK_VALID_SIZE(inputs.size(), 2);
2600
2601 if (inputTensorInfo.IsQuantized())
2602 {
2603 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2604 }
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002605 }
Mike Kelly0d77ae12022-01-07 17:42:27 +00002606 else if (opcode == tflite::BuiltinOperator_PADV2)
2607 {
2608 CHECK_VALID_SIZE(inputs.size(), 3);
2609
Mike Kelly377fb212023-01-10 15:55:28 +00002610 armnn::TensorInfo padValueTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002611
2612 if (padValueTensorInfo.GetNumElements() != 1)
2613 {
2614 ARMNN_THROW_PARSE_EXCEPTION("Multiple padding values are not supported in PADV2");
2615 }
2616 BufferRawPtr padValueBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2617
2618 // Get the pad value from the input tensor
2619 if (padValueBufferPtr->data.size() > 0)
2620 {
2621 switch (padValueTensorInfo.GetDataType())
2622 {
2623 case armnn::DataType::Float32:
2624 {
2625 std::vector<float> padValueBuffer(padValueTensorInfo.GetNumElements());
2626 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2627 desc.m_PadValue = padValueBuffer[0];
2628 break;
2629 }
2630 case armnn::DataType::QAsymmU8:
2631 {
2632 std::vector<uint8_t> padValueBuffer(padValueTensorInfo.GetNumElements());
2633 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2634 desc.m_PadValue = armnn::Dequantize<uint8_t>(padValueBuffer[0],
2635 padValueTensorInfo.GetQuantizationScale(),
2636 padValueTensorInfo.GetQuantizationOffset());
2637 break;
2638 }
2639 case armnn::DataType::QAsymmS8:
2640 case armnn::DataType::QSymmS8:
2641 {
2642 std::vector<int8_t> padValueBuffer(padValueTensorInfo.GetNumElements());
2643 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2644 desc.m_PadValue = armnn::Dequantize<int8_t>(padValueBuffer[0],
2645 padValueTensorInfo.GetQuantizationScale(),
2646 padValueTensorInfo.GetQuantizationOffset());
2647 break;
2648 }
2649 default: ARMNN_THROW_PARSE_EXCEPTION("Unsupported DataType");
2650 }
2651 }
2652 else if (inputTensorInfo.IsQuantized())
2653 {
2654 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2655 }
2656 }
2657
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002658 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2659 {
2660 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2661 }
2662
Mike Kelly0d77ae12022-01-07 17:42:27 +00002663 auto layerName = (opcode == tflite::BuiltinOperator_PAD) ? fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex)
2664 : fmt::format("PadV2:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01002665
2666 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
2667 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00002668 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002669 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2670
2671 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2672 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2673
2674 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2675 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2676}
2677
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002678void TfLiteParserImpl::ParseMirrorPad(size_t subgraphIndex, size_t operatorIndex)
2679{
2680 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2681
2682 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2683 CHECK_VALID_SIZE(inputs.size(), 2);
2684
2685 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2686 CHECK_VALID_SIZE(outputs.size(), 1);
2687
Mike Kelly377fb212023-01-10 15:55:28 +00002688 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002689
Mike Kelly377fb212023-01-10 15:55:28 +00002690 armnn::TensorInfo padTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002691 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2692
2693 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
2694 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
2695
2696 size_t step = 2;
2697 armnn::PadDescriptor desc;
2698 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2699 {
2700 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2701 }
2702
2703 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2704 const auto* options = operatorPtr->builtin_options.AsMirrorPadOptions();
2705
2706 if (options->mode == tflite::MirrorPadMode_REFLECT)
2707 {
2708 desc.m_PaddingMode = PaddingMode::Reflect;
2709 }
2710 else if (options->mode == tflite::MirrorPadMode_SYMMETRIC)
2711 {
2712 desc.m_PaddingMode = PaddingMode::Symmetric;
2713 }
2714 else
2715 {
2716 ARMNN_THROW_PARSE_EXCEPTION("PaddingMode must be either REFLECT or SYMMETRIC");
2717 }
2718
2719 // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
2720 // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
2721 auto inputShape = inputTensorInfo.GetShape();
2722 auto padList = desc.m_PadList;
2723
2724 const unsigned int isReflect = static_cast<unsigned int>(desc.m_PaddingMode == PaddingMode::Reflect);
2725 for(unsigned int i = 0; i < padList.size(); ++i)
2726 {
2727 if(padList.at(i).first > (inputShape[i] - isReflect) ||
2728 padList.at(i).second > (inputShape[i] - isReflect))
2729 {
2730 ARMNN_THROW_PARSE_EXCEPTION("Padding values must be less (Reflect) or "
2731 "equal (Symmetric) to the dimension size.");
2732 }
2733 }
2734
2735 auto layerName = fmt::format("MirrorPad:{}:{}", subgraphIndex, operatorIndex);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002736
2737 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
2738 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00002739 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002740 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2741
2742 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2743 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2744
2745 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2746 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2747}
2748
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002749void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
2750{
2751 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2752
2753 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2754 CHECK_VALID_SIZE(inputs.size(), 2);
2755
2756 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2757 CHECK_VALID_SIZE(outputs.size(), 1);
2758
2759 auto layerName = fmt::format("Prelu:{}:{}", subgraphIndex, operatorIndex);
2760
Mike Kelly377fb212023-01-10 15:55:28 +00002761 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2762 armnn::TensorInfo alphaTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002763
2764 IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
2765 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00002766
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002767
2768 if (IsConstTensor(inputs[1]))
2769 {
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002770 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawaratbf99b5f2021-05-27 09:55:43 +01002771 armnn::IInputSlot* slot = &(layer->GetInputSlot(0));
2772 RegisterConsumerOfTensor(subgraphIndex, inputTensorIndexes[0], slot);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002773
Mike Kelly5880b912022-01-28 16:18:54 +00002774 auto alphaTensorAndData = CreateConstTensorNonPermuted(inputs[1], alphaTensorInfo,
2775 inputTensorInfo.GetDataType());
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002776 std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name);
2777 IConnectableLayer* constLayer =
Mike Kelly5880b912022-01-28 16:18:54 +00002778 m_Network->AddConstantLayer(alphaTensorAndData.first, constLayerName.c_str());
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002779 ARMNN_ASSERT(constLayer != nullptr);
2780
2781 constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
2782 constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
2783 RegisterOutputSlots(subgraphIndex,
2784 VIRTUAL_OPERATOR_ID,
2785 constLayer,
2786 { inputTensorIndexes[1] });
2787 }
2788 else
2789 {
2790 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2791 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIndexes);
2792 }
2793
Mike Kelly377fb212023-01-10 15:55:28 +00002794 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
2795 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2796 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2797
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002798 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2799 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2800}
2801
Kevin May7d96b162021-02-03 17:38:41 +00002802void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan66dedc72019-12-10 16:32:07 +00002803{
2804 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2805
2806 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2807 CHECK_VALID_SIZE(inputs.size(), 1);
2808
2809 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2810 CHECK_VALID_SIZE(outputs.size(), 1);
2811
James Ward58dec6b2020-09-11 17:32:44 +01002812 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002813
2814 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002815 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002816
Mike Kelly377fb212023-01-10 15:55:28 +00002817 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan66dedc72019-12-10 16:32:07 +00002818 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2819
2820 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2821 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2822
2823 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2824 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2825}
Finn Williamsc42c3842019-01-22 14:18:11 +00002826
Kevin May7d96b162021-02-03 17:38:41 +00002827void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002828{
Finn Williamsc42c3842019-01-22 14:18:11 +00002829 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01002830}
2831
Kevin May7d96b162021-02-03 17:38:41 +00002832void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002833{
Finn Williamsc42c3842019-01-22 14:18:11 +00002834 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
2835}
Sadik Armagan58f39192018-09-17 14:14:39 +01002836
Kevin May7d96b162021-02-03 17:38:41 +00002837void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan12239e72020-05-27 11:06:17 +01002838{
Jan Eilers2f746b32020-07-28 14:00:06 +01002839 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01002840}
2841
Kevin May7d96b162021-02-03 17:38:41 +00002842void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsc42c3842019-01-22 14:18:11 +00002843{
2844 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
2845}
2846
Kevin May7d96b162021-02-03 17:38:41 +00002847void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd99851762019-04-09 09:37:38 +01002848{
2849 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
2850}
2851
Kevin May7d96b162021-02-03 17:38:41 +00002852void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
Matthew Sloyan7515d072020-12-16 12:50:01 +00002853{
2854 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
2855}
2856
Kevin May7d96b162021-02-03 17:38:41 +00002857void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
Jan Eilers2f746b32020-07-28 14:00:06 +01002858{
2859 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
2860}
Finn Williamsc42c3842019-01-22 14:18:11 +00002861
Kevin May7d96b162021-02-03 17:38:41 +00002862void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
Finn Williamsc42c3842019-01-22 14:18:11 +00002863{
2864 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002865 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00002866 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01002867
2868 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2869 CHECK_VALID_SIZE(inputs.size(), 1);
2870
2871 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2872 CHECK_VALID_SIZE(outputs.size(), 1);
2873
James Ward58dec6b2020-09-11 17:32:44 +01002874 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01002875 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00002876 activationDesc.m_Function = activationType;
2877
2878 switch (activationType)
2879 {
2880 case ActivationFunction::ReLu:
2881 {
James Ward58dec6b2020-09-11 17:32:44 +01002882 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002883 break;
2884 }
2885 case ActivationFunction::BoundedReLu:
2886 {
James Ward58dec6b2020-09-11 17:32:44 +01002887 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002888 activationDesc.m_A = 6.0f;
2889 activationDesc.m_B = 0.0f;
2890 break;
2891 }
2892 case ActivationFunction::Sigmoid:
2893 {
James Ward58dec6b2020-09-11 17:32:44 +01002894 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002895 break;
2896 }
Nina Drozd99851762019-04-09 09:37:38 +01002897 case ActivationFunction::TanH:
2898 {
James Ward58dec6b2020-09-11 17:32:44 +01002899 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01002900 activationDesc.m_A = 1.0f;
2901 activationDesc.m_B = 1.0f;
2902 break;
2903 }
Sadik Armagan12239e72020-05-27 11:06:17 +01002904 case ActivationFunction::LeakyReLu:
2905 {
James Ward58dec6b2020-09-11 17:32:44 +01002906 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002907 const auto* options = operatorPtr->builtin_options.AsLeakyReluOptions();
Sadik Armagan12239e72020-05-27 11:06:17 +01002908 activationDesc.m_A = options->alpha;
2909 break;
2910 }
Matthew Sloyan7515d072020-12-16 12:50:01 +00002911 case ActivationFunction::Elu:
2912 {
2913 layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
2914 activationDesc.m_A = 1.0f;
2915 break;
2916 }
Jan Eilers2f746b32020-07-28 14:00:06 +01002917 case ActivationFunction::HardSwish:
Matthew Sloyan7515d072020-12-16 12:50:01 +00002918 {
James Ward58dec6b2020-09-11 17:32:44 +01002919 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01002920 break;
Matthew Sloyan7515d072020-12-16 12:50:01 +00002921 }
Finn Williamsc42c3842019-01-22 14:18:11 +00002922 default:
2923 {
2924 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002925 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
2926 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00002927 }
2928 }
2929
2930 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01002931
Mike Kelly377fb212023-01-10 15:55:28 +00002932 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan58f39192018-09-17 14:14:39 +01002933 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2934
2935 // register the input connection slots for the layer, connections are made after all layers have been created
2936 // only the tensors for the inputs are relevant, exclude the const tensors
2937 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2938 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2939
2940 // register the output connection slots for the layer, connections are made after all layers have been created
2941 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2942 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2943}
Mike Kelly0d77ae12022-01-07 17:42:27 +00002944armnn::TensorInfo TfLiteParserImpl::OutputShapeOfReshape(const armnn::TensorInfo& inputTensorInfo,
2945 const std::vector<int32_t>& targetDimsIn)
Sadikb94967b2018-09-19 15:30:00 +01002946{
2947 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2948 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2949
2950 if (stretchDim != targetDimsIn.end())
2951 {
2952 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2953 {
2954 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002955 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01002956 }
2957
2958 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002959 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01002960 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2961
2962 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2963 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2964 }
2965
2966 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2967
2968 TensorInfo reshapeInfo = inputTensorInfo;
2969 reshapeInfo.SetShape(outputShape);
2970
2971 return reshapeInfo;
2972}
2973
Kevin May7d96b162021-02-03 17:38:41 +00002974void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
Sadikb94967b2018-09-19 15:30:00 +01002975{
2976 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2977
2978 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002979
2980 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2981 CHECK_VALID_SIZE(outputs.size(), 1);
2982
Mike Kelly0d77ae12022-01-07 17:42:27 +00002983 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2984 const auto* options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002985 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002986
Mike Kelly377fb212023-01-10 15:55:28 +00002987 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
kevmay0171972a82018-12-17 14:28:03 +00002988 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01002989 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00002990
Jan Eilersbac9b352020-07-13 13:40:24 +01002991 // Extracting new shape for the output
2992 // There are two ways it can be passed
2993 // * First is to define the target shape in the operator built-in options
2994 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00002995 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01002996 bool targetShapeFound = false;
2997 // Check if built-in options were given
2998 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00002999 {
Jan Eilersbac9b352020-07-13 13:40:24 +01003000 // make sure the parameter is given
3001 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00003002 {
Jan Eilersbac9b352020-07-13 13:40:24 +01003003 targetShape = options->new_shape;
3004 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00003005 }
Derek Lambertic9e52792020-03-11 11:42:26 +00003006 }
Jan Eilersbac9b352020-07-13 13:40:24 +01003007
3008 // If there is no built-in option given or if the built-in new_shape parameter was empty
3009 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00003010 {
Teresa Charlin6a056a42021-12-01 10:25:43 +00003011 // Check for a second input tensor
3012 if (inputs.size() > 1 && inputs[1] != nullptr)
Jan Eilersbac9b352020-07-13 13:40:24 +01003013 {
3014 if (inputs[1]->is_variable)
3015 {
3016 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
3017 }
3018
3019 if (inputs[1]->shape.size() != 1)
3020 {
3021 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
3022 }
3023
3024 if (inputs[1]->type != tflite::TensorType_INT32)
3025 {
3026 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
3027 }
3028
Teresa Charlin6a056a42021-12-01 10:25:43 +00003029 // Extract target shape from input
3030 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3031 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
Cathal Corbettd2f73232021-12-10 13:38:52 +00003032 if (values)
Sadik Armagan19a1c032021-01-20 12:17:00 +00003033 {
Cathal Corbettd2f73232021-12-10 13:38:52 +00003034 for (int i = 0; i < inputs[1]->shape[0]; ++i)
3035 {
3036 targetShape.push_back(values[i]);
3037 }
Sadik Armagan19a1c032021-01-20 12:17:00 +00003038 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00003039 else
Jan Eilersbac9b352020-07-13 13:40:24 +01003040 {
Cathal Corbettd2f73232021-12-10 13:38:52 +00003041 try
3042 {
3043 // We attempt to infer during Runtime.
Mike Kelly04d82292023-01-19 18:29:40 +00003044 TensorShape reshapeShapes = ToTensorInfo(inputs[1]).GetShape();
3045
3046 if (reshapeShapes[0] == actualOutputTensorInfo.GetNumDimensions())
3047 {
3048 for (unsigned int i = 0; i < actualOutputTensorInfo.GetShape().GetNumDimensions(); ++i)
3049 {
3050 targetShape.push_back(actualOutputTensorInfo.GetShape()[i]);
3051 }
3052 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00003053 // The parser only supports shape (batch, -1) or (-1) for non-constant shape input.
Mike Kelly04d82292023-01-19 18:29:40 +00003054 else if (reshapeShapes[0] > 2)
Cathal Corbettd2f73232021-12-10 13:38:52 +00003055 {
3056 throw ParseException(fmt::format("Invalid input shape '{}' in Reshape layer '{}' {}. "
3057 "When inferring during runtime, the parser only supports "
3058 "shape (batch, -1) or (-1) for target shape input.",
3059 reshapeShapes[0],
3060 layerName,
3061 CHECK_LOCATION().AsString()));
3062 }
Mike Kelly04d82292023-01-19 18:29:40 +00003063 else
Cathal Corbettd2f73232021-12-10 13:38:52 +00003064 {
Mike Kelly04d82292023-01-19 18:29:40 +00003065 const int32_t numInputElements = inputTensorInfo.GetNumElements();
3066 const int32_t inputTensorShape = inputTensorInfo.GetShape()[0];
3067 if (reshapeShapes[0] == 1)
3068 {
3069 targetShape = {numInputElements};
3070 }
3071 else if (reshapeShapes[0] == 2)
3072 {
3073 targetShape = {inputTensorShape, numInputElements / inputTensorShape};
3074 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00003075 }
3076 }
3077 catch (const std::exception& exc)
3078 {
3079 ARMNN_THROW_PARSE_EXCEPTION("Failed attempt to infer during runtime the target shape input for "
3080 "Reshape operation. Reshape operator target shape input buffer data "
3081 "is null. " << exc.what());
3082 }
Jan Eilersbac9b352020-07-13 13:40:24 +01003083 }
3084 }
3085 else
Derek Lambertic9e52792020-03-11 11:42:26 +00003086 {
3087 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
3088 "At least one method required");
3089 }
Derek Lambertic9e52792020-03-11 11:42:26 +00003090 }
3091
kevmay0171972a82018-12-17 14:28:03 +00003092 armnn::TensorInfo reshapeOutputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00003093 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01003094
kevmay0171972a82018-12-17 14:28:03 +00003095 // Check for valid input size and that reshape parameters equal output shape
Cathal Corbett2b922e22022-09-23 15:49:24 +01003096 // The output shape can be provided to us in 2 ways:
3097 // 1. through the normal 'shape' parameter given by outputs[indx]->shape
3098 // 2. through additional parameter 'shape_signature' given by outputs[indx]->buffer.
3099 // This parameter can sometimes contain -1 value not visible in the 'shape' parameter.
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00003100 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
3101 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00003102 {
Cathal Corbett2b922e22022-09-23 15:49:24 +01003103 // Attempt to extract output shape from secondary 'shape_signature'
3104 // parameter and try to CheckShape() with this param.
3105 std::vector<int32_t> secondaryOutputTargetShape = outputs[0]->shape_signature;
3106
3107 // if outputs[0]->shape_signature contain a -1 value, we need to compute its actual value
3108 // from reshape input in order to correctly verify reshape parameters equal output shape
3109 armnn::TensorInfo secondaryReshapeOutputTensorInfo =
3110 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, secondaryOutputTargetShape);
3111
3112 if (!CheckShape(reshapeOutputTensorShape, secondaryReshapeOutputTensorInfo.GetShape()))
3113 {
3114 std::stringstream ss;
3115 ss << "New shape defined in reshape parameters "
3116 << reshapeOutputTensorShape
3117 << " does not equal output shape "
3118 << actualOutputTensorInfo.GetShape()
3119 << ": "
3120 << CHECK_LOCATION().AsString();
3121 throw ParseException(ss.str());
3122 }
kevmay0171972a82018-12-17 14:28:03 +00003123 }
Mike Kelly377fb212023-01-10 15:55:28 +00003124 auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
kevmay0171972a82018-12-17 14:28:03 +00003125
Sadikb94967b2018-09-19 15:30:00 +01003126 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00003127 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Mike Kelly377fb212023-01-10 15:55:28 +00003128 m_TensorInfos[outputTensorIds[0]] = reshapeOutputTensorInfo;
Sadikb94967b2018-09-19 15:30:00 +01003129
Sadikb94967b2018-09-19 15:30:00 +01003130 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003131 ARMNN_ASSERT(layer != nullptr);
kevmay0171972a82018-12-17 14:28:03 +00003132 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01003133
3134 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3135 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3136
3137 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3138 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3139}
3140
Kevin May7d96b162021-02-03 17:38:41 +00003141void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003142{
Sadik Armagana3b31f02019-12-05 09:08:53 +00003143 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
3144}
3145
Kevin May7d96b162021-02-03 17:38:41 +00003146void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagana3b31f02019-12-05 09:08:53 +00003147{
3148 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
3149}
3150
Kevin May7d96b162021-02-03 17:38:41 +00003151void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
Sadik Armagana3b31f02019-12-05 09:08:53 +00003152{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003153 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3154
3155 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3156 CHECK_VALID_SIZE(inputs.size(), 2);
3157
3158 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3159 CHECK_VALID_SIZE(outputs.size(), 1);
3160
Mike Kelly377fb212023-01-10 15:55:28 +00003161 armnn::TensorInfo sizeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003162
3163 // Data for the parsed tensor args (size) must be stored locally.
3164 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
3165
3166 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3167 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
3168
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01003169 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00003170 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003171 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01003172 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
3173 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003174
James Ward58dec6b2020-09-11 17:32:44 +01003175 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00003176
3177 switch (resizeMethod)
3178 {
3179 case ResizeMethod::Bilinear:
3180 {
James Ward58dec6b2020-09-11 17:32:44 +01003181 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00003182
3183 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3184 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
3185
David Monahan4a0c9b92020-05-30 09:48:39 +01003186 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00003187 break;
3188 }
3189 case ResizeMethod::NearestNeighbor:
3190 {
James Ward58dec6b2020-09-11 17:32:44 +01003191 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00003192 break;
3193 }
3194 default:
3195 {
3196 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003197 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
3198 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00003199 }
3200 }
3201
Mike Kelly377fb212023-01-10 15:55:28 +00003202 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01003203
3204 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
3205 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00003206 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
3207 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003208 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3209
3210 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3211 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3212
3213 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3214 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3215}
3216
Kevin May7d96b162021-02-03 17:38:41 +00003217void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan479045b2018-10-01 11:51:37 +01003218{
3219 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3220
Mike Kelly0d77ae12022-01-07 17:42:27 +00003221 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3222 const auto* options = operatorPtr->builtin_options.AsConcatenationOptions();
Sadik Armagan479045b2018-10-01 11:51:37 +01003223
3224 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
3225
3226 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3227 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Mike Kelly377fb212023-01-10 15:55:28 +00003228 auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex);
3229
Sadik Armagan479045b2018-10-01 11:51:37 +01003230 CHECK_VALID_SIZE(outputs.size(), 1);
3231
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003232 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
Mike Kelly377fb212023-01-10 15:55:28 +00003233 uint32_t inputRank = InputTensorInfo(subgraphIndex, operatorIndex, 0).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01003234
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003235 const unsigned int concatDimInput = static_cast<unsigned int>(
Mike Kelly377fb212023-01-10 15:55:28 +00003236 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01003237
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003238 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
3239 concatDescriptor.SetConcatAxis(concatDimInput);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003240 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01003241
3242 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
3243 {
Mike Kelly377fb212023-01-10 15:55:28 +00003244 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, viewIndex);
Sadik Armagan479045b2018-10-01 11:51:37 +01003245
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003246 // This set up concatDescriptor view origin
3247 armnnUtils::ProcessConcatInputTensorInfo(
Mike Kelly377fb212023-01-10 15:55:28 +00003248 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01003249 }
3250
James Ward58dec6b2020-09-11 17:32:44 +01003251 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01003252
Jim Flynn906f9462019-05-10 13:55:21 +01003253 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003254 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00003255 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003256 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01003257
James Conroy05102392020-06-24 15:39:55 +01003258 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003259 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01003260
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003261 // add fused activation layer
3262 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01003263
Sadik Armagan479045b2018-10-01 11:51:37 +01003264 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3265 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3266}
3267
Kevin May7d96b162021-02-03 17:38:41 +00003268void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003269{
3270 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3271
Mike Kelly0d77ae12022-01-07 17:42:27 +00003272 const auto& operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003273 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
3274
3275 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
3276
3277 FullyConnectedDescriptor desc;
3278 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01003279 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003280
3281 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3282 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3283 CHECK_VALID_SIZE(outputs.size(), 1);
3284
Mike Kelly377fb212023-01-10 15:55:28 +00003285 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003286
3287 // Fully Connected Layer accepts two dimensional weights input
3288 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
3289 if (weightsDimension != 2)
3290 {
3291 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003292 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
3293 "Node {}",
3294 weightsDimension,
3295 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003296 }
3297
Matthew Jackson74bf7da2019-08-16 16:51:42 +01003298 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01003299 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003300
Matthew Sloyan81beae32021-07-13 19:46:11 +01003301 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3302 // Add the first input tensor to the registration list
3303 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0]};
Mike Kelly377fb212023-01-10 15:55:28 +00003304 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003305
3306 desc.m_ConstantWeights = IsConstTensor(inputs[1]);
3307
Matthew Sloyan81beae32021-07-13 19:46:11 +01003308 // Add the weights input to the registration list, constant layers will be added by SetupConstantLayers if constant.
3309 tensorIndexesToRegister.emplace_back(inputTensorIndexes[1]);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003310
Mike Kelly0506ef02023-01-03 16:29:44 +00003311 if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
Mike Kelly5880b912022-01-28 16:18:54 +00003312 {
3313 m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
3314 }
3315
Finn Williamsd4fa5452021-03-01 12:31:41 +00003316 if (inputs.size() == 3)
3317 {
3318 desc.m_BiasEnabled = true;
Mike Kelly377fb212023-01-10 15:55:28 +00003319 armnn::TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Matthew Sloyan81beae32021-07-13 19:46:11 +01003320
3321 // Add the biases input to the registration list, constant layer will be added by SetupConstantLayers.
3322 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
Mike Kelly5880b912022-01-28 16:18:54 +00003323
Mike Kelly0506ef02023-01-03 16:29:44 +00003324 if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
Mike Kelly5880b912022-01-28 16:18:54 +00003325 {
3326 m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
3327 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00003328 }
3329
Matthew Sloyan81beae32021-07-13 19:46:11 +01003330 // Filters and biases are always passed to fully connected as inputs
3331 layer = m_Network->AddFullyConnectedLayer(desc, layerName.c_str());
Finn Williamsd4fa5452021-03-01 12:31:41 +00003332
3333 ARMNN_ASSERT(layer != nullptr);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003334
Finn Williamsd4fa5452021-03-01 12:31:41 +00003335 unsigned int startingSlotIndex = 0;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003336 if (inputTensorInfo.GetNumDimensions() > 2)
3337 {
3338 // Add reshape to flatten to 2D [batch_size, input_size],
3339 // where "input_size" corresponds to the number of inputs to the layer,
3340 // matching the second dimension of weights,
3341 // and "batch_size" is calculated by dividing the number of elements by "input_size".
3342 std::vector<unsigned int> reshapedDimensions(2);
3343 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
3344 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
3345
3346 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
3347 {
3348 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003349 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
3350 reshapedDimensions[1],
3351 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003352 }
3353
Mike Kelly377fb212023-01-10 15:55:28 +00003354 armnn::TensorInfo reshapedTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003355 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
Mike Kelly377fb212023-01-10 15:55:28 +00003356 inputTensorInfo = reshapedTensorInfo;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003357
James Ward58dec6b2020-09-11 17:32:44 +01003358 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Finn Williamsd4fa5452021-03-01 12:31:41 +00003359 armnn::ReshapeDescriptor reshapeDescriptor;
3360 reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
Mike Kelly04d82292023-01-19 18:29:40 +00003361 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor,
3362 reshapeLayerName.c_str());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003363
3364 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
3365 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
3366
3367 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
Finn Williamsd4fa5452021-03-01 12:31:41 +00003368 // Fc layer connects to the reshape layer, so we skip the first input slot when registering fc's input slots
3369 tensorIndexesToRegister.erase(tensorIndexesToRegister.begin());
3370 startingSlotIndex = 1;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003371 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00003372
3373 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister, startingSlotIndex);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003374
Mike Kelly377fb212023-01-10 15:55:28 +00003375 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromShapes(subgraphIndex, operatorIndex, layer, 0,
3376 { inputTensorInfo.GetShape(),
3377 filterTensorInfo.GetShape() });
Mike Kelly04d82292023-01-19 18:29:40 +00003378
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003379 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3380
Mike Kelly04d82292023-01-19 18:29:40 +00003381 if (outputTensorInfo.GetNumDimensions() > 2)
3382 {
3383 // Calculate reshape to flatten to 2D [batch_size, input_size]
3384 std::vector<unsigned int> reshapedDimensions(2);
3385 reshapedDimensions[1] = filterTensorInfo.GetShape()[0];
3386 reshapedDimensions[0] = outputTensorInfo.GetNumElements() / reshapedDimensions[1];
3387 armnn::TensorInfo reshapedOutputTensorInfo = outputTensorInfo;
3388 if (outputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
3389 {
3390 throw ParseException(
3391 fmt::format("Failed to deduce output tensor shape from filter size {} {}",
3392 reshapedDimensions[1],
3393 CHECK_LOCATION().AsString()));
3394 }
3395 reshapedOutputTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
3396 layer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
3397
3398 std::string reshapeLayerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
3399 layer = AddReshapeLayer(layer, 0, reshapeLayerName, outputTensorInfo);
3400 }
3401
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003402 // we need to add the activation layer and fortunately we don't need to care about the data layout
3403 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
3404 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003405
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003406 // register the output connection slots for the layer, connections are made after all layers have been created
3407 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3408 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
Mike Kelly04d82292023-01-19 18:29:40 +00003409
3410 m_TensorInfos[outputTensorIndexes[0]] = layer->GetOutputSlot(0).GetTensorInfo();
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003411}
3412
Kevin May7d96b162021-02-03 17:38:41 +00003413void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
keidav011b3e2ea2019-02-21 10:07:37 +00003414{
3415 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3416
Mike Kelly0d77ae12022-01-07 17:42:27 +00003417 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
keidav011b3e2ea2019-02-21 10:07:37 +00003418
3419 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3420 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3421 CHECK_VALID_SIZE(outputs.size(), 4);
3422
3423 // Obtain custom options from flexbuffers
3424 auto custom_options = operatorPtr->custom_options;
3425 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
3426
3427 // Obtain descriptor information from tf lite
3428 DetectionPostProcessDescriptor desc;
3429 desc.m_MaxDetections = m["max_detections"].AsUInt32();
3430 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
3431 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
3432 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
3433 desc.m_NumClasses = m["num_classes"].AsUInt32();
3434 desc.m_ScaleH = m["h_scale"].AsFloat();
3435 desc.m_ScaleW = m["w_scale"].AsFloat();
3436 desc.m_ScaleX = m["x_scale"].AsFloat();
3437 desc.m_ScaleY = m["y_scale"].AsFloat();
3438
keidav0107d58c72019-02-26 11:57:39 +00003439 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00003440 {
keidav0107d58c72019-02-26 11:57:39 +00003441 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00003442 }
3443 if (!(m["detections_per_class"].IsNull()))
3444 {
3445 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
3446 }
3447
3448 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
3449 {
3450 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
3451 "must be positive and less than or equal to 1.");
3452 }
3453
Mike Kelly377fb212023-01-10 15:55:28 +00003454 armnn::TensorInfo anchorTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003455 auto anchorTensorAndData = CreateConstTensorNonPermuted(inputs[2], anchorTensorInfo);
keidav011b3e2ea2019-02-21 10:07:37 +00003456
James Ward58dec6b2020-09-11 17:32:44 +01003457 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003458 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData,
keidav011b3e2ea2019-02-21 10:07:37 +00003459 layerName.c_str());
3460
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003461 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00003462
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003463 // The model does not specify the output shapes.
3464 // The output shapes are calculated from the max_detection and max_classes_per_detection.
3465 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
Mike Kelly377fb212023-01-10 15:55:28 +00003466 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox, 4 });
3467 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox });
3468 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox });
3469 m_OverriddenOutputShapes.push_back({ 1 });
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003470
keidav011b3e2ea2019-02-21 10:07:37 +00003471 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
3472 {
Mike Kelly377fb212023-01-10 15:55:28 +00003473 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverriddenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00003474 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
3475 }
3476
3477 // Register the input connection slots for the layer, connections are made after all layers have been created
3478 // only the tensors for the inputs are relevant, exclude the const tensors
3479 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3480 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3481
3482 // Register the output connection slots for the layer, connections are made after all layers have been created
3483 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3484 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
3485 outputTensorIndexes[1],
3486 outputTensorIndexes[2],
3487 outputTensorIndexes[3]});
3488}
3489
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003490/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
Kevin May7d96b162021-02-03 17:38:41 +00003491void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003492{
3493 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3494
3495 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3496 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3497 CHECK_VALID_SIZE(outputs.size(), 1);
3498
3499 if (inputs.size() < 1)
3500 {
3501 throw ParseException("Pack must have at least one input.");
3502 }
3503
3504 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3505 const auto* options = operatorPtr->builtin_options.AsPackOptions();
3506
3507 StackDescriptor desc;
3508 desc.m_Axis = static_cast<uint32_t>(options->axis);
3509 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
3510
3511 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00003512 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003513 desc.m_InputShape = inputTensorInfo.GetShape();
3514
James Ward58dec6b2020-09-11 17:32:44 +01003515 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003516 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
3517
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003518 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003519
Mike Kelly377fb212023-01-10 15:55:28 +00003520 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003521 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3522
3523 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3524 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
3525
3526 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3527 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3528}
3529
Mike Kelly5880b912022-01-28 16:18:54 +00003530void TfLiteParserImpl::ParseUnidirectionalSequenceLSTM(size_t subgraphIndex, size_t operatorIndex)
3531{
3532 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3533
3534 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3535 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3536
3537 if (inputs.size() < 2)
3538 {
3539 throw ParseException("UnidirectionalSequenceLSTM must have at least 2 input.");
3540 }
3541
3542 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3543 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
3544 const auto nodeParams = operatorPtr->builtin_options.AsUnidirectionalSequenceLSTMOptions();
3545 CHECK_SUPPORTED_FUSED_ACTIVATION(nodeParams, subgraphIndex, operatorIndex);
Mike Kelly377fb212023-01-10 15:55:28 +00003546 auto inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly5880b912022-01-28 16:18:54 +00003547 auto outputTensorInfo = ToTensorInfo(outputs[0]);
3548
3549 // Set the params structure for the AddUnidirectionalSequenceLstmLayer call
3550 // Please refer to each operand at
3551 // https://www.tensorflow.org/mlir/tfl_ops#tflunidirectional_sequence_lstm_tflunidirectionalsequencelstmop
3552 armnn::LstmInputParams params;
3553
3554 if (IsOptionalOperandPresent(operatorPtr->inputs[1]))
3555 {
3556 params.m_InputToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[1]].get(),
3557 inputTensorInfo).first;
3558 }
3559
3560 params.m_InputToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[2]].get(),
3561 inputTensorInfo).first;
3562 params.m_InputToCellWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[3]].get(),
3563 inputTensorInfo).first;
3564 params.m_InputToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[4]].get(),
3565 inputTensorInfo).first;
3566
3567 // Recurrent weight tensors of size {n_cell, n_output}
3568 if (IsOptionalOperandPresent(operatorPtr->inputs[5]))
3569 {
3570 params.m_RecurrentToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[5]].get(),
3571 inputTensorInfo).first;
3572 }
3573
3574 params.m_RecurrentToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[6]].get(),
3575 inputTensorInfo).first;
3576 params.m_RecurrentToCellWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[7]].get(),
3577 inputTensorInfo).first;
3578 params.m_RecurrentToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[8]].get(),
3579 inputTensorInfo).first;
3580
3581 // Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
3582 if (IsOptionalOperandPresent(operatorPtr->inputs[9]))
3583 {
3584 params.m_CellToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[9]].get(),
3585 inputTensorInfo).first;
3586 }
3587
3588 if (IsOptionalOperandPresent(operatorPtr->inputs[10]))
3589 {
3590 params.m_CellToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[10]].get(),
3591 inputTensorInfo).first;
3592 }
3593
3594 if (IsOptionalOperandPresent(operatorPtr->inputs[11]))
3595 {
3596 params.m_CellToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[11]].get(),
3597 inputTensorInfo).first;
3598 }
3599
3600 // Gates bias tensors of size {n_cell}
3601 if (IsOptionalOperandPresent(operatorPtr->inputs[12]))
3602 {
3603 params.m_InputGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[12]].get(),
3604 inputTensorInfo).first;
3605 }
3606
3607 params.m_ForgetGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[13]].get(),
3608 inputTensorInfo).first;
3609 params.m_CellBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[14]].get(),
3610 inputTensorInfo).first;
3611 params.m_OutputGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[15]].get(),
3612 inputTensorInfo).first;
3613
3614 // Projection weight tensor of size {n_output, n_cell}
3615 if (IsOptionalOperandPresent(operatorPtr->inputs[16]))
3616 {
3617 params.m_ProjectionWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[16]].get(),
3618 inputTensorInfo).first;
3619 }
3620 // Projection bias tensor of size {n_output}
3621 if (IsOptionalOperandPresent(operatorPtr->inputs[17]))
3622 {
3623 params.m_ProjectionBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[17]].get(),
3624 inputTensorInfo).first;
3625 }
3626
3627 // These state tensors are defined as variable tensors, and will be modified by this op.
3628 armnn::TensorInfo outputStateInInfo = ToTensorInfo(subgraphPtr->tensors[operatorPtr->inputs[18]].get());
3629 m_ConstantsToBeCreated.push_back(operatorPtr->inputs[18]);
3630 armnn::TensorInfo cellStateInInfo = ToTensorInfo(subgraphPtr->tensors[operatorPtr->inputs[19]].get());
3631 m_ConstantsToBeCreated.push_back(operatorPtr->inputs[19]);
3632
3633 // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix.
3634 if (inputs.size() >= 21 && IsOptionalOperandPresent(operatorPtr->inputs[20]))
3635 {
3636 params.m_InputLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[20]].get(),
3637 inputTensorInfo).first;
3638 }
3639
3640 if (inputs.size() >= 22 && IsOptionalOperandPresent(operatorPtr->inputs[21]))
3641 {
3642 params.m_ForgetLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[21]].get(),
3643 inputTensorInfo).first;
3644 }
3645
3646 if (inputs.size() >= 23 && IsOptionalOperandPresent(operatorPtr->inputs[22]))
3647 {
3648 params.m_CellLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[22]].get(),
3649 inputTensorInfo).first;
3650 }
3651
3652 if (inputs.size() >= 24 && IsOptionalOperandPresent(operatorPtr->inputs[23]))
3653 {
3654 params.m_OutputLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[23]].get(),
3655 inputTensorInfo).first;
3656 }
3657
3658 // set the layer descriptor
3659 armnn::UnidirectionalSequenceLstmDescriptor desc;
3660 desc.m_ActivationFunc = nodeParams->fused_activation_function;
3661 desc.m_ClippingThresCell = nodeParams->cell_clip;
3662 desc.m_ClippingThresProj = nodeParams->proj_clip;
3663 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr
3664 || params.m_RecurrentToInputWeights == nullptr
3665 || params.m_InputGateBias == nullptr);
3666 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr);
3667 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
3668 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr
3669 || params.m_ForgetLayerNormWeights != nullptr
3670 || params.m_CellLayerNormWeights != nullptr
3671 || params.m_OutputLayerNormWeights != nullptr);
3672 desc.m_TimeMajor = nodeParams->time_major;
3673
Mike Kellyc0800a32022-06-15 10:57:52 +01003674 if (operatorPtr->intermediates.size() > 3 && desc.m_LayerNormEnabled)
Mike Kelly5880b912022-01-28 16:18:54 +00003675 {
3676 auto inputIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[0]].get(),
3677 inputTensorInfo).first;
3678 auto inputIntermediateTensorInfo = inputIntermediate->GetInfo();
3679 desc.m_InputIntermediateScale = inputIntermediateTensorInfo.GetQuantizationScale();
3680
3681 auto forgetIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[1]].get(),
3682 inputTensorInfo).first;
3683 auto forgetIntermediateTensorInfo = forgetIntermediate->GetInfo();
3684 desc.m_ForgetIntermediateScale = forgetIntermediateTensorInfo.GetQuantizationScale();
3685
3686 auto cellIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[2]].get(),
3687 inputTensorInfo).first;
3688 auto cellIntermediateTensorInfo = cellIntermediate->GetInfo();
3689 desc.m_CellIntermediateScale = cellIntermediateTensorInfo.GetQuantizationScale();
3690
3691 auto outputIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[3]].get(),
3692 inputTensorInfo).first;
3693 auto outputIntermediateTensorInfo = outputIntermediate->GetInfo();
3694 desc.m_OutputIntermediateScale = outputIntermediateTensorInfo.GetQuantizationScale();
3695 }
3696 else
3697 {
3698 float defaultIntermediate = std::pow(2, -12);
3699 desc.m_InputIntermediateScale = defaultIntermediate;
3700 desc.m_ForgetIntermediateScale = defaultIntermediate;
3701 desc.m_CellIntermediateScale = defaultIntermediate;
3702 desc.m_OutputIntermediateScale = defaultIntermediate;
3703 }
3704
Mike Kellyc0800a32022-06-15 10:57:52 +01003705 if (operatorPtr->intermediates.size() > 4)
3706 {
3707 auto hiddentensor = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[4]].get(),
3708 inputTensorInfo).first;
Mike Kelly5880b912022-01-28 16:18:54 +00003709
Mike Kellyc0800a32022-06-15 10:57:52 +01003710 desc.m_HiddenStateScale = hiddentensor->GetInfo().GetQuantizationScale();
3711 desc.m_HiddenStateZeroPoint = hiddentensor->GetInfo().GetQuantizationOffset();
3712 }
Mike Kelly5880b912022-01-28 16:18:54 +00003713 unsigned int batchSize = inputTensorInfo.GetShape()[0];
3714 unsigned int outputSize = outputTensorInfo.GetShape()[2];
3715 unsigned int numUnits = cellStateInInfo.GetShape()[1];
3716
3717 armnn::DataType dataType = inputTensorInfo.GetDataType();
3718 float qScale = inputTensorInfo.GetQuantizationScale();
3719 float qOffset = inputTensorInfo.GetQuantizationOffset();
3720
3721 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset);
3722 if (!desc.m_CifgEnabled)
3723 {
3724 scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
3725 }
3726 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits},
3727 cellStateInInfo.GetDataType(),
3728 cellStateInInfo.GetQuantizationScale(),
3729 cellStateInInfo.GetQuantizationOffset());
3730 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
3731
3732 armnn::LstmInputParamsInfo paramsInfo;
3733 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
3734 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
3735 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
3736 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
3737 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
3738 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
3739 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
3740 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
3741 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
3742
3743 if (!desc.m_CifgEnabled)
3744 {
3745 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
3746 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
3747 if (params.m_CellToInputWeights != nullptr)
3748 {
3749 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
3750 }
3751 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
3752 }
3753
3754 if (desc.m_ProjectionEnabled)
3755 {
3756 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
3757 if (params.m_ProjectionBias != nullptr)
3758 {
3759 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
3760 }
3761 }
3762
3763 if (desc.m_PeepholeEnabled)
3764 {
3765 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
3766 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
3767 }
3768
3769 if (desc.m_LayerNormEnabled)
3770 {
3771 if(!desc.m_CifgEnabled)
3772 {
3773 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
3774 }
3775 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
3776 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
3777 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
3778 }
3779
3780 auto layerName = fmt::format("UnidirectionalSequenceLSTM:{}:{}", subgraphIndex, operatorIndex);
3781 armnn::IConnectableLayer* layer = m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
3782 ARMNN_ASSERT(layer != nullptr);
3783
3784 // register the input connection slots for the layer, connections are made after all layers have been created
3785 // only the tensors for the inputs are relevant, exclude the const tensors
3786 auto inputTensorIndexes = AsUnsignedVector({operatorPtr->inputs[0],
3787 operatorPtr->inputs[18],
3788 operatorPtr->inputs[19]});
3789 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0],
3790 inputTensorIndexes[1],
3791 inputTensorIndexes[2]});
3792
3793 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3794
3795 layer->GetOutputSlot(0).SetTensorInfo(outputStateOutTensorInfo);
3796 layer->GetOutputSlot(1).SetTensorInfo(cellStateOutTensorInfo);
3797 layer->GetOutputSlot(2).SetTensorInfo(outputTensorInfo);
3798
3799 unsigned int tensorIndex = outputTensorIndexes[0];
3800 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(2));
3801 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3802}
3803
Kevin May7d96b162021-02-03 17:38:41 +00003804void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd200e3802019-04-15 09:47:39 +01003805{
3806 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3807
Mike Kelly0d77ae12022-01-07 17:42:27 +00003808 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3809 const auto* options = operatorPtr->builtin_options.AsUnpackOptions();
Nina Drozd200e3802019-04-15 09:47:39 +01003810
3811 // This unpackAxis indicates the axis to unpack
3812 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
3813
3814 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3815 CHECK_VALID_SIZE(inputs.size(), 1);
3816
Mike Kelly377fb212023-01-10 15:55:28 +00003817 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003818
3819 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
3820 {
3821 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003822 fmt::format("The unpack axis: {} cannot be greater than or equal to "
3823 "the number of input dimension {} {}",
3824 unpackAxis,
3825 inputTensorInfo.GetNumDimensions(),
3826 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003827 }
3828
Nina Drozd200e3802019-04-15 09:47:39 +01003829 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
3830 // If num is not defined, automatically infer from the length of the dimension axis.
3831 if(unpackNum == 0)
3832 {
3833 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
3834 }
3835
3836 // If unpack number cannot be inferred and is still zero, throw ParseException.
3837 if(unpackNum == 0)
3838 {
3839 throw ParseException("Number to unpack must greater than zero.");
3840 }
3841
3842 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3843 CHECK_VALID_SIZE(outputs.size(), unpackNum);
3844
3845 auto inputDimSize = inputTensorInfo.GetNumDimensions();
3846 std::vector<unsigned int> unpackDimSizes(inputDimSize);
3847
3848 // Add current input shape to unpackDimSizes
3849 for (unsigned int i = 0; i < inputDimSize; ++i)
3850 {
3851 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
3852 }
3853
3854 if (unpackDimSizes[unpackAxis] != unpackNum)
3855 {
3856 throw ParseException("Number to unpack must be the same as length of the dimension to "
3857 "unpack along.");
3858 }
3859
3860 unpackDimSizes[unpackAxis] /= unpackNum;
3861
3862 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
3863 for (unsigned int j = 0; j < unpackNum; ++j)
3864 {
3865 // Set the size of the views.
3866 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
3867 {
3868 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
3869 }
3870 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
3871 }
3872
James Ward58dec6b2020-09-11 17:32:44 +01003873 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01003874 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003875 ARMNN_ASSERT(layer != nullptr);
Nina Drozd200e3802019-04-15 09:47:39 +01003876
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003877 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
3878 unpackDimSizes.data());
3879
Nina Drozd200e3802019-04-15 09:47:39 +01003880 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3881 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3882
Finn Williamsb49ed182021-06-29 15:50:08 +01003883 std::vector<unsigned int> reshapeDims;
3884 for (unsigned int axis = 0; axis < splitOutShape.GetNumDimensions(); ++axis)
3885 {
3886 if (axis != unpackAxis)
3887 {
3888 reshapeDims.push_back(splitOutShape[axis]);
3889 }
3890 }
3891
3892 TensorShape reshapeOutputShape(splitOutShape.GetNumDimensions() -1, reshapeDims.data());
3893
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003894 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
3895 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
3896 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01003897 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01003898 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003899 armnn::ReshapeDescriptor desc;
Finn Williamsb49ed182021-06-29 15:50:08 +01003900 desc.m_TargetShape = reshapeOutputShape;
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003901 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
3902
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01003903 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
3904 outputTensorInfo.GetDataType(),
3905 outputTensorInfo.GetQuantizationScale(),
3906 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003907 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
3908
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01003909 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003910
3911 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
3912 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
3913 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
3914 }
Nina Drozd200e3802019-04-15 09:47:39 +01003915}
3916
Kevin May7d96b162021-02-03 17:38:41 +00003917void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd0324f482019-04-08 10:52:10 +01003918{
3919 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3920
Mike Kelly0d77ae12022-01-07 17:42:27 +00003921 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3922 const auto* options = operatorPtr->builtin_options.AsSplitOptions();
Nina Drozd0324f482019-04-08 10:52:10 +01003923
3924 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
3925
Nina Drozd200e3802019-04-15 09:47:39 +01003926 // If number of splits cannot be inferred and is zero, throw ParseException.
3927 if(numSplits == 0)
3928 {
3929 throw ParseException("Number to splits must greater than zero.");
3930 }
3931
Nina Drozd0324f482019-04-08 10:52:10 +01003932 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3933 CHECK_VALID_SIZE(inputs.size(), 2);
3934 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3935 CHECK_VALID_SIZE(outputs.size(), numSplits);
3936
Mike Kelly377fb212023-01-10 15:55:28 +00003937 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
3938 armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003939 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Nina Drozd0324f482019-04-08 10:52:10 +01003940
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003941 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003942 if (axisBufferPtr == nullptr)
3943 {
3944 throw ParseException(
3945 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
3946 CHECK_LOCATION().AsString()));
3947 }
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003948
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003949 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
3950 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
3951 int32_t axis = axisData[0];
3952
3953 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3954 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3955 {
3956 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
3957 // E.g. Rank 4 tensor can have axis in range [-4, 3)
3958 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
3959 throw ParseException(
3960 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
3961 axis,
3962 CHECK_LOCATION().AsString()));
3963 }
3964
3965 const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
Nina Drozd0324f482019-04-08 10:52:10 +01003966
Nina Drozd0324f482019-04-08 10:52:10 +01003967 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003968 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01003969 {
3970 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003971 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
3972 inputTensorInfo.GetNumDimensions(),
3973 MaxNumOfTensorDimensions,
3974 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01003975 }
3976
3977 std::vector<unsigned int> splitterDimSizes(inputDimSize);
3978
3979 // Add current input shape to splitterDimSizes
3980 for (unsigned int i = 0; i < inputDimSize; ++i)
3981 {
3982 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
3983 }
3984
3985 if (splitterDimSizes[splitDim] % numSplits != 0)
3986 {
3987 throw ParseException("Number of splits must evenly divide the dimension");
3988 }
3989 splitterDimSizes[splitDim] /= numSplits;
3990
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003991 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01003992 for (unsigned int j = 0; j < numSplits; ++j)
3993 {
3994 // Set the size of the views.
3995 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
3996 {
3997 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
3998 }
3999 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
4000 }
4001
James Ward58dec6b2020-09-11 17:32:44 +01004002 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01004003 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01004004 ARMNN_ASSERT(layer != nullptr);
Nina Drozd0324f482019-04-08 10:52:10 +01004005
4006 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01004007 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01004008
Nina Drozd0324f482019-04-08 10:52:10 +01004009 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4010 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01004011 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01004012 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01004013 }
4014
4015 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4016 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4017}
4018
Derek Lambertif0176992020-04-28 13:37:49 +01004019unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
4020{
4021 int numDims = armnn::numeric_cast<int>(numDimsIn);
4022 int v = idx < 0 ? numDims + idx : idx;
4023 ARMNN_ASSERT(v >= 0);
4024 ARMNN_ASSERT(v < numDims);
4025
4026 return static_cast<unsigned int>(v);
4027}
4028
Kevin May7d96b162021-02-03 17:38:41 +00004029void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
Derek Lambertif0176992020-04-28 13:37:49 +01004030{
4031 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4032
Mike Kelly0d77ae12022-01-07 17:42:27 +00004033 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4034 const auto* options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01004035
4036 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4037 CHECK_VALID_SIZE(inputs.size(), 3);
4038
4039 auto& inputTensor = inputs[0];
4040 auto& splitsTensor = inputs[1];
4041 auto& axisTensor = inputs[2];
4042
4043 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
4044 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
4045 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
4046 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
4047
4048 // Inputs
4049 auto inputDimSize = inputTensorInfo.GetNumDimensions();
4050 if (inputDimSize > MaxNumOfTensorDimensions)
4051 {
4052 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004053 fmt::format("The number of dimensions: {} for input tensors of the "
4054 "SplitV op cannot be greater than {} {}",
4055 inputTensorInfo.GetNumDimensions(),
4056 MaxNumOfTensorDimensions,
4057 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01004058 }
4059
4060 // Get split axis
4061 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004062 if (axisBufferPtr == nullptr)
4063 {
4064 throw ParseException(
4065 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4066 CHECK_LOCATION().AsString()));
4067 }
4068
Derek Lambertif0176992020-04-28 13:37:49 +01004069 std::vector<int> axisData(axisTensorInfo.GetNumElements());
4070 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004071 int32_t axis = axisData[0];
4072
4073 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4074 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4075 {
4076 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4077 // E.g. Rank 4 tensor can have axis in range [-4, 3)
4078 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4079 throw ParseException(
4080 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4081 axis,
4082 CHECK_LOCATION().AsString()));
4083 }
4084 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
Derek Lambertif0176992020-04-28 13:37:49 +01004085
Derek Lambertif0176992020-04-28 13:37:49 +01004086 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01004087 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01004088 unsigned int numSplits{0};
4089
4090 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01004091 {
4092 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01004093 }
4094 else
4095 {
Ryan OShea86704732020-05-26 11:41:04 +01004096 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01004097 }
4098
4099 if (numSplits <=0)
4100 {
4101 throw ParseException("SplitV has invalid number of splits");
4102 }
4103
Jan Eilersc0761e92020-06-29 16:48:44 +01004104 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01004105 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01004106 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01004107
Jan Eilersc0761e92020-06-29 16:48:44 +01004108 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01004109 int numInferred{0};
4110 unsigned int inferIdx{0};
4111 int splitSum{0};
4112 for (auto split : splitsData)
4113 {
4114 if (split < 0)
4115 {
4116 numInferred++;
4117 inferIdx = idx;
4118 }
4119 else
4120 {
4121 splitSum += split;
4122 }
4123 idx++;
4124 }
4125 // Check for inferred Axis
4126 if (numInferred == 0)
4127 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004128 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01004129 {
4130 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
4131 }
4132 }
4133 else if (numInferred == 1)
4134 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004135 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01004136 }
4137 else
4138 {
4139 throw ParseException("Cannot infer split size for more than one split");
4140 }
4141
Derek Lambertif0176992020-04-28 13:37:49 +01004142 //Ouput size validation
4143 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4144 CHECK_VALID_SIZE(outputs.size(), numSplits);
4145
4146 // Setup Armnn descriptor
4147 SplitterDescriptor splitDesc(numSplits, inputDimSize);
4148 unsigned int accumSplit = 0;
4149 for (unsigned int j = 0; j < numSplits; ++j)
4150 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004151 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01004152
4153 // Set the size of the views.
4154 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
4155 {
4156 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
4157 if (dimIdx == splitDim)
4158 {
4159 dimSize = splitSize;
4160 }
4161 splitDesc.SetViewSize(j, dimIdx, dimSize);
4162 }
4163
4164 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
4165 accumSplit += splitSize;
4166 }
4167
James Ward58dec6b2020-09-11 17:32:44 +01004168 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01004169 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01004170 ARMNN_ASSERT(layer != nullptr);
Derek Lambertif0176992020-04-28 13:37:49 +01004171
4172 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4173 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4174
4175 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4176 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01004177 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01004178 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
4179 }
4180
4181 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4182 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4183}
4184
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004185void TfLiteParserImpl::ParseArgMin(size_t subgraphIndex, size_t operatorIndex)
4186{
4187 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Min);
4188}
4189
Kevin May7d96b162021-02-03 17:38:41 +00004190void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
Inki Daed4619e22020-09-10 15:33:54 +09004191{
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004192 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Max);
4193}
4194
4195void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, ArgMinMaxFunction argMinMaxFunction)
4196{
Inki Daed4619e22020-09-10 15:33:54 +09004197 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4198 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4199 CHECK_VALID_SIZE(inputs.size(), 2);
4200
4201 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4202 CHECK_VALID_SIZE(outputs.size(), 1);
4203
Mike Kelly377fb212023-01-10 15:55:28 +00004204 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4205 armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Inki Daed4619e22020-09-10 15:33:54 +09004206 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004207 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004208
4209 // Check if output tensor type is Signed32 or Signed64
Mike Kelly1f140f72021-04-06 12:25:55 +01004210 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
4211 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
4212 {
4213 throw ParseException(
4214 fmt::format(
4215 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
4216 CHECK_LOCATION().AsString()));
4217 }
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004218
4219 // Get const axis value from model and set it to descriptor.
4220 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
4221 if (axisBufferPtr == nullptr)
4222 {
4223 throw ParseException(
4224 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4225 CHECK_LOCATION().AsString()));
4226 }
4227
4228 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
4229 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
4230 int32_t axis = axisData.front();
4231
4232 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4233 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4234 {
4235 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4236 // E.g. Rank 4 tensor can have axis in range [-4, 3)
4237 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4238 throw ParseException(
4239 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4240 axis,
4241 CHECK_LOCATION().AsString()));
4242 }
4243
4244 ArgMinMaxDescriptor desc;
4245 desc.m_Axis = axis;
4246 desc.m_Function = argMinMaxFunction;
4247
4248 // Register a ArgMin/ArgMax layer.
4249 auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}";
4250 auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4251 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str());
4252 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00004253 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Inki Daed4619e22020-09-10 15:33:54 +09004254 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4255
4256 // Register input tensor to the layer.
4257 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4258 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4259
4260 // Register output tensor to the layer.
4261 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4262 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4263}
4264
Kevin May7d96b162021-02-03 17:38:41 +00004265void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00004266{
4267 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4268
Kevin May7d96b162021-02-03 17:38:41 +00004269 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004270 CHECK_VALID_SIZE(inputs.size(), 2);
Kevin May7d96b162021-02-03 17:38:41 +00004271 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004272 CHECK_VALID_SIZE(outputs.size(), 1);
4273
Mike Kelly377fb212023-01-10 15:55:28 +00004274 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4275 armnn::TensorInfo indicesTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4276 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Sadik Armagan26868492021-01-22 14:25:31 +00004277
4278 armnn::GatherDescriptor gatherDescriptor;
4279
Mike Kelly0d77ae12022-01-07 17:42:27 +00004280 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4281 const auto* options = operatorPtr->builtin_options.AsGatherOptions();
Sadik Armagan26868492021-01-22 14:25:31 +00004282 auto axis = options->axis;
4283
Mike Kelly377fb212023-01-10 15:55:28 +00004284 auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
4285
Sadik Armagan26868492021-01-22 14:25:31 +00004286 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4287 auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
4288 auto outputDimensions = outputTensorInfo.GetNumDimensions();
4289 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4290 {
4291 throw ParseException(
4292 fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
4293 axis,
4294 inputDimensions, inputDimensions,
4295 CHECK_LOCATION().AsString()));
4296 }
4297 if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
4298 {
4299 throw ParseException(
4300 fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
4301 outputDimensions,
4302 inputDimensions, indicesDimensions,
4303 CHECK_LOCATION().AsString()));
4304 }
4305
4306 gatherDescriptor.m_Axis = axis;
4307
Sadik Armagan26868492021-01-22 14:25:31 +00004308 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
4309 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00004310 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Sadik Armagan26868492021-01-22 14:25:31 +00004311 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4312
4313 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4314 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4315
4316 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4317 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4318}
4319
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004320void TfLiteParserImpl::ParseGatherNd(size_t subgraphIndex, size_t operatorIndex)
4321{
4322 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4323
4324 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4325 CHECK_VALID_SIZE(inputs.size(), 2);
4326 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4327 CHECK_VALID_SIZE(outputs.size(), 1);
4328
Mike Kelly377fb212023-01-10 15:55:28 +00004329 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4330 armnn::TensorInfo indicesTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004331
4332 auto layerName = fmt::format("GatherNd:{}:{}", subgraphIndex, operatorIndex);
4333 IConnectableLayer* layer = m_Network->AddGatherNdLayer(layerName.c_str());
4334 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00004335 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004336 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4337
4338 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4339 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4340
4341 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4342 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4343}
4344
Kevin May7d96b162021-02-03 17:38:41 +00004345void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00004346{
4347 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4348
Kevin May7d96b162021-02-03 17:38:41 +00004349 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004350 CHECK_VALID_SIZE(inputs.size(), 1);
Kevin May7d96b162021-02-03 17:38:41 +00004351 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004352 CHECK_VALID_SIZE(outputs.size(), 1);
4353
4354 armnn::DepthToSpaceDescriptor descriptor;
4355
Mike Kelly0d77ae12022-01-07 17:42:27 +00004356 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4357 const auto* options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
Sadik Armagan26868492021-01-22 14:25:31 +00004358 auto blockSize = options->block_size;
4359 if (blockSize < 2)
4360 {
4361 throw ParseException(
4362 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
4363 blockSize,
4364 CHECK_LOCATION().AsString()));
4365 }
4366 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
4367
4368 auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
4369 IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
4370 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00004371 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan26868492021-01-22 14:25:31 +00004372 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4373
4374 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4375 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4376
4377 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4378 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4379}
4380
Kevin May7d96b162021-02-03 17:38:41 +00004381void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004382{
Sadik Armagana2747482021-02-09 10:28:54 +00004383 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Sum);
4384}
4385
Teresa Charlin4e3e8312021-08-05 12:34:37 +01004386void TfLiteParserImpl::ParseReduceProd(size_t subgraphIndex, size_t operatorIndex)
4387{
4388 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Prod);
4389}
4390
Sadik Armagana2747482021-02-09 10:28:54 +00004391void TfLiteParserImpl::ParseReduceMax(size_t subgraphIndex, size_t operatorIndex)
4392{
4393 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Max);
4394}
4395
4396void TfLiteParserImpl::ParseReduceMin(size_t subgraphIndex, size_t operatorIndex)
4397{
4398 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Min);
4399}
4400
4401void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, ReduceOperation reduceOperation)
4402{
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004403 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4404
Mike Kelly0d77ae12022-01-07 17:42:27 +00004405 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4406 const auto* options = operatorPtr->builtin_options.AsReducerOptions();
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004407
4408 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4409 CHECK_VALID_SIZE(inputs.size(), 2);
4410
4411 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4412 CHECK_VALID_SIZE(outputs.size(), 1);
4413
Sadik Armagana2747482021-02-09 10:28:54 +00004414 auto layerName = fmt::format("Reduce:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004415
Mike Kelly377fb212023-01-10 15:55:28 +00004416 armnn::TensorInfo inputTensorInfo0 = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4417 armnn::TensorInfo inputTensorInfo1 = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004418
4419 ReduceDescriptor desc;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004420 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
4421 // Get const axis value from model and set it to descriptor.
4422 if (axisBufferPtr != nullptr)
4423 {
Sadik Armagan49bdb792021-02-11 13:57:07 +00004424 std::vector<int32_t> axisData(inputTensorInfo1.GetNumElements());
4425 ::memcpy(axisData.data(), axisBufferPtr->data.data(), inputTensorInfo1.GetNumBytes());
4426
4427 // Convert the axis to unsigned int and remove duplicates.
4428 auto rank = static_cast<int32_t>(inputTensorInfo0.GetNumDimensions());
4429 std::set<unsigned int> uniqueAxis;
4430 std::transform(axisData.begin(),
4431 axisData.end(),
4432 std::inserter(uniqueAxis, uniqueAxis.begin()),
4433 [rank](int i)->unsigned int{
4434 return static_cast<uint32_t>(((i + rank) % rank)); });
4435 desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004436 }
Sadik Armagana2747482021-02-09 10:28:54 +00004437 else
4438 {
4439 for (uint32_t i = 0; i < inputTensorInfo0.GetNumDimensions(); ++i)
4440 {
4441 desc.m_vAxis.push_back(i);
4442 }
4443 }
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004444
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004445 desc.m_KeepDims = options->keep_dims;
Sadik Armagana2747482021-02-09 10:28:54 +00004446 desc.m_ReduceOperation = reduceOperation;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004447
4448 // Register a new layer object, Sum.
Mike Kelly0d77ae12022-01-07 17:42:27 +00004449 IConnectableLayer* layer = m_Network->AddReduceLayer(desc, layerName.c_str());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004450
Mike Kelly377fb212023-01-10 15:55:28 +00004451 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004452 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4453
4454 // Register input tensor to the layer.
4455 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4456 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4457
4458 // Register output tensor to the layer.
4459 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4460 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4461}
4462
Mike Kelly31dce2b2021-09-01 21:22:37 +01004463void TfLiteParserImpl::ParseLocalResponseNormalization(size_t subgraphIndex, size_t operatorIndex)
4464{
4465 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4466
4467 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4468 CHECK_VALID_SIZE(inputs.size(), 1);
4469
4470 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4471 CHECK_VALID_SIZE(outputs.size(), 1);
4472
4473 auto layerName = fmt::format("LRN:{}:{}", subgraphIndex, operatorIndex);
4474 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4475
Mike Kelly377fb212023-01-10 15:55:28 +00004476 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly31dce2b2021-09-01 21:22:37 +01004477
4478 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4479 const auto* options = operatorPtr->builtin_options.AsLocalResponseNormalizationOptions();
4480
4481 armnn::NormalizationDescriptor descriptor;
4482 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4483 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
4484 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
4485 descriptor.m_NormSize = static_cast<uint32_t>(options->radius);
4486 descriptor.m_K = options->bias;
4487 descriptor.m_Alpha = options->alpha;
4488 descriptor.m_Beta = options->beta;
4489
4490 // ArmNN expects normSize to be the full size of the normalization
4491 // window rather than the radius as in TfLite.
4492 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
4493
4494 IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor, layerNameFormatted.c_str());
4495 ARMNN_ASSERT(layer != nullptr);
4496
Mike Kelly377fb212023-01-10 15:55:28 +00004497 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Mike Kelly31dce2b2021-09-01 21:22:37 +01004498 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4499
4500 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4501 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4502
4503 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4504 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4505}
4506
Teresa Charlin28aa6692022-07-12 11:18:44 +01004507void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
4508{
4509 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
4510}
4511
4512void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
4513{
4514 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
4515}
4516
4517void TfLiteParserImpl::ParseLog(size_t subgraphIndex, size_t operatorIndex)
4518{
4519 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Log);
4520}
4521
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004522void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
4523{
4524 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
4525}
4526
4527void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
4528{
4529 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
4530}
4531
4532void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
4533{
4534 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
4535}
4536
Teresa Charlin28aa6692022-07-12 11:18:44 +01004537void TfLiteParserImpl::ParseSin(size_t subgraphIndex, size_t operatorIndex)
4538{
4539 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sin);
4540}
4541
Teresa Charlinf0fce5b2022-05-04 17:24:43 +01004542void TfLiteParserImpl::ParseSqrt(size_t subgraphIndex, size_t operatorIndex)
4543{
4544 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sqrt);
4545}
4546
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004547void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
4548{
4549 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4550
4551 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4552 CHECK_VALID_SIZE(inputs.size(), 1);
4553
4554 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4555 CHECK_VALID_SIZE(outputs.size(), 1);
4556
4557 std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
4558 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4559
4560 ElementwiseUnaryDescriptor desc;
4561 desc.m_Operation = unaryOperation;
4562 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
4563 ARMNN_ASSERT(layer != nullptr);
4564
Mike Kelly377fb212023-01-10 15:55:28 +00004565 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004566 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4567
4568 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4569 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4570
4571 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4572 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4573}
4574
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03004575void TfLiteParserImpl::ParseEqual(size_t subgraphIndex, size_t operatorIndex)
4576{
4577 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Equal);
4578}
4579
4580void TfLiteParserImpl::ParseNotEqual(size_t subgraphIndex, size_t operatorIndex)
4581{
4582 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::NotEqual);
4583}
4584
4585void TfLiteParserImpl::ParseGreater(size_t subgraphIndex, size_t operatorIndex)
4586{
4587 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Greater);
4588}
4589
4590void TfLiteParserImpl::ParseGreaterOrEqual(size_t subgraphIndex, size_t operatorIndex)
4591{
4592 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::GreaterOrEqual);
4593}
4594
4595void TfLiteParserImpl::ParseLess(size_t subgraphIndex, size_t operatorIndex)
4596{
4597 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Less);
4598}
4599
4600void TfLiteParserImpl::ParseLessOrEqual(size_t subgraphIndex, size_t operatorIndex)
4601{
4602 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::LessOrEqual);
4603}
4604
4605void TfLiteParserImpl::ParseComparison(size_t subgraphIndex, size_t operatorIndex,
4606 ComparisonOperation comparisonOperation)
4607{
4608 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4609
4610 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4611 CHECK_VALID_SIZE(inputs.size(), 2);
4612
4613 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4614 CHECK_VALID_SIZE(outputs.size(), 1);
4615
4616 auto layerName = std::string(GetComparisonOperationAsCString(comparisonOperation)) + ":{}:{}";
4617 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4618
Mike Kelly377fb212023-01-10 15:55:28 +00004619 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4620 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03004621 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerNameFormatted, "Input 0", "Input 1");
4622
4623 ComparisonDescriptor desc;
4624 desc.m_Operation = comparisonOperation;
4625 IConnectableLayer* layer = m_Network->AddComparisonLayer(desc, layerNameFormatted.c_str());
4626 ARMNN_ASSERT(layer != nullptr);
4627
Mike Kelly377fb212023-01-10 15:55:28 +00004628 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03004629 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4630
4631 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4632 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4633
4634 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4635 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4636}
4637
Mike Kelly04d82292023-01-19 18:29:40 +00004638armnn::IConnectableLayer* TfLiteParserImpl::AddReshapeLayer(armnn::IConnectableLayer* layer,
4639 unsigned int outputSlot,
4640 std::string reshapeLayerName,
4641 armnn::TensorInfo outputShape)
4642{
4643 ReshapeDescriptor desc;
4644 desc.m_TargetShape = outputShape.GetShape();
4645
4646 IConnectableLayer* reshapeLayer =
4647 m_Network->AddReshapeLayer(desc, reshapeLayerName.c_str());
4648
4649 auto & prevOutputSlot = layer->GetOutputSlot(outputSlot);
4650 prevOutputSlot.Connect(reshapeLayer->GetInputSlot(0));
4651 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputShape);
4652 return reshapeLayer;
4653}
4654
Kevin May7d96b162021-02-03 17:38:41 +00004655armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
4656 unsigned int outputSlot,
4657 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01004658{
4659 ActivationDescriptor activationDesc;
4660 std::string layerName = prevLayer->GetName();
4661
4662 switch(activationType)
4663 {
4664 case tflite::ActivationFunctionType_NONE:
4665 {
4666 // this is a no-op: return previous layer
4667 return prevLayer;
4668 }
4669 case tflite::ActivationFunctionType_RELU:
4670 {
4671 activationDesc.m_Function = ActivationFunction::ReLu;
4672 layerName += ":RELU";
4673 break;
4674 }
4675 case tflite::ActivationFunctionType_RELU6:
4676 {
4677 activationDesc.m_Function = ActivationFunction::BoundedReLu;
4678 activationDesc.m_A = 6.0f;
4679 activationDesc.m_B = 0.0f;
4680 layerName += ":RELU6";
4681 break;
4682 }
4683 case tflite::ActivationFunctionType_TANH:
4684 {
4685 activationDesc.m_Function = ActivationFunction::TanH;
4686 activationDesc.m_A = 1.0f;
4687 activationDesc.m_B = 1.0f;
4688 layerName += ":TANH";
4689 break;
4690 }
4691
4692 // I only put these here as a reminder what others we could support
4693 case tflite::ActivationFunctionType_RELU_N1_TO_1:
4694 case tflite::ActivationFunctionType_SIGN_BIT:
4695 default:
4696 {
4697 throw ParseException(
Mike Kelly377fb212023-01-10 15:55:28 +00004698 fmt::format("TfLite parser doesn't support fused activation: "
James Ward58dec6b2020-09-11 17:32:44 +01004699 "{}/{} {} ",
4700 activationType,
4701 tflite::EnumNameActivationFunctionType(activationType),
4702 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004703
4704 }
4705 }
4706
4707 IConnectableLayer* activationLayer =
4708 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
4709
4710 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
4711 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
4712 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
4713 return activationLayer;
4714}
4715
Teresa Charlincdbd40b2022-02-25 13:21:55 +00004716armnn::IConnectableLayer* TfLiteParserImpl::AddFusedFloorLayer(armnn::IConnectableLayer* prevLayer,
4717 unsigned int outputSlot)
4718{
Teresa Charlin725728e2022-05-05 13:33:33 +01004719
4720 auto& prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
4721 DataType dataType = prevOutputSlot.GetTensorInfo().GetDataType();
4722
4723 if (dataType == DataType::Signed32)
4724 {
4725 return prevLayer;
4726 }
4727
Teresa Charlincdbd40b2022-02-25 13:21:55 +00004728 std::string layerName = prevLayer->GetName();
4729 IConnectableLayer* floorLayer = m_Network->AddFloorLayer(layerName.c_str());
4730
Teresa Charlincdbd40b2022-02-25 13:21:55 +00004731 prevOutputSlot.Connect(floorLayer->GetInputSlot(0));
4732 floorLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
Teresa Charlin725728e2022-05-05 13:33:33 +01004733
Teresa Charlincdbd40b2022-02-25 13:21:55 +00004734 return floorLayer;
4735}
4736
Mike Kelly0d77ae12022-01-07 17:42:27 +00004737TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char* fileName)
telsoa01c577f2c2018-08-31 09:22:23 +01004738{
4739 if (fileName == nullptr)
4740 {
James Ward58dec6b2020-09-11 17:32:44 +01004741 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01004742 CHECK_LOCATION().AsString()));
4743 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01004744 std::error_code errorCode;
4745 fs::path pathToFile(fileName);
4746 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01004747 {
James Ward58dec6b2020-09-11 17:32:44 +01004748 //fmt::format() could not be used here (format error)
4749 std::stringstream msg;
4750 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
4751 << " " << CHECK_LOCATION().AsString();
4752
4753 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01004754 }
4755 std::ifstream file(fileName, std::ios::binary);
4756 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
4757 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
4758 fileContent.size());
4759}
4760
Mike Kelly0d77ae12022-01-07 17:42:27 +00004761TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromBinary(const uint8_t* binaryContent, size_t len)
telsoa01c577f2c2018-08-31 09:22:23 +01004762{
4763 if (binaryContent == nullptr)
4764 {
James Ward58dec6b2020-09-11 17:32:44 +01004765 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01004766 CHECK_LOCATION().AsString()));
4767 }
4768 flatbuffers::Verifier verifier(binaryContent, len);
4769 if (verifier.VerifyBuffer<tflite::Model>() == false)
4770 {
4771 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004772 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
4773 "flatbuffers format. size:{} {}",
4774 len,
4775 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004776 }
4777 return tflite::UnPackModel(binaryContent);
4778}
4779
Mike Kelly0d77ae12022-01-07 17:42:27 +00004780TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetInputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00004781 size_t subgraphIndex,
4782 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004783{
4784 CHECK_MODEL(model, subgraphIndex, operatorIndex);
4785
Mike Kelly0d77ae12022-01-07 17:42:27 +00004786 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4787 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004788
4789 size_t inputCount = operatorPtr->inputs.size();
mathad01c21025d2021-04-26 10:09:37 +01004790 TensorRawPtrVector result;
Mike Kelly0d77ae12022-01-07 17:42:27 +00004791 for (size_t i = 0; i < inputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004792 {
mathad01c21025d2021-04-26 10:09:37 +01004793 // If the input location is -1 then assume input is turned off.
4794 if (operatorPtr->inputs[i] == -1)
4795 {
4796 continue;
4797 }
4798 else
4799 {
4800 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
4801 result.push_back(subgraphPtr->tensors[inputId].get());
4802 }
telsoa01c577f2c2018-08-31 09:22:23 +01004803 }
4804 return result;
4805}
4806
Mike Kelly0d77ae12022-01-07 17:42:27 +00004807TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetOutputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00004808 size_t subgraphIndex,
4809 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004810{
4811 CHECK_MODEL(model, subgraphIndex, operatorIndex);
4812
Mike Kelly0d77ae12022-01-07 17:42:27 +00004813 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4814 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004815
4816 size_t outputCount = operatorPtr->outputs.size();
4817 TensorRawPtrVector result(outputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004818 for (size_t i = 0; i < outputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004819 {
4820 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
4821 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01004822 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01004823 }
4824 return result;
4825}
4826
Mike Kelly0d77ae12022-01-07 17:42:27 +00004827TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphInputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00004828 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004829{
4830 CHECK_SUBGRAPH(model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004831 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004832
Derek Lambertiff05cc52019-04-26 13:05:17 +01004833 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01004834 TensorIdRawPtrVector result(inputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004835 for (size_t i = 0; i < inputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004836 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01004837 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01004838 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01004839 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01004840 }
4841 return result;
4842}
4843
Mike Kelly0d77ae12022-01-07 17:42:27 +00004844TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphOutputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00004845 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004846{
4847 CHECK_SUBGRAPH(model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004848 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004849
Derek Lambertiff05cc52019-04-26 13:05:17 +01004850 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01004851 TensorIdRawPtrVector result(outputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004852 for (size_t i = 0; i < outputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004853 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01004854 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
4855 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01004856 }
4857 return result;
4858}
4859
Kevin May7d96b162021-02-03 17:38:41 +00004860std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
4861 size_t subgraphIndex,
4862 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004863{
4864 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004865 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4866 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004867 return operatorPtr->inputs;
4868}
4869
Kevin May7d96b162021-02-03 17:38:41 +00004870std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
4871 size_t subgraphIndex,
4872 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004873{
4874 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004875 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4876 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004877 return operatorPtr->outputs;
4878}
4879
Kevin May7d96b162021-02-03 17:38:41 +00004880void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
4881 size_t operatorIndex,
4882 IConnectableLayer* layer,
Finn Williamsd4fa5452021-03-01 12:31:41 +00004883 const std::vector<unsigned int>& tensorIndexes,
4884 unsigned int startingSlotIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004885{
4886 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01004887 ARMNN_ASSERT(layer != nullptr);
Matthew Sloyan81beae32021-07-13 19:46:11 +01004888
Finn Williamsd4fa5452021-03-01 12:31:41 +00004889 if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots())
telsoa01c577f2c2018-08-31 09:22:23 +01004890 {
4891 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004892 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
4893 " for subgraph:{} operator index:{} {}",
4894 tensorIndexes.size(),
4895 layer->GetNumInputSlots(),
4896 subgraphIndex,
4897 operatorIndex,
4898 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004899 }
4900
Finn Williamsd4fa5452021-03-01 12:31:41 +00004901 for (unsigned int index = 0; index < tensorIndexes.size() ; ++index)
telsoa01c577f2c2018-08-31 09:22:23 +01004902 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00004903 unsigned int tensorIndex = tensorIndexes[index];
4904 armnn::IInputSlot* slot = &(layer->GetInputSlot(startingSlotIndex + index));
telsoa01c577f2c2018-08-31 09:22:23 +01004905 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
4906 }
4907}
4908
Kevin May7d96b162021-02-03 17:38:41 +00004909void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
4910 size_t operatorIndex,
4911 IConnectableLayer* layer,
4912 const std::vector<unsigned int>& tensorIndexes)
telsoa01c577f2c2018-08-31 09:22:23 +01004913{
4914 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01004915 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01004916 if (tensorIndexes.size() != layer->GetNumOutputSlots())
4917 {
4918 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004919 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
4920 " for subgraph:{} operator index:{} {}",
4921 tensorIndexes.size(),
4922 layer->GetNumOutputSlots(),
4923 subgraphIndex,
4924 operatorIndex,
4925 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004926 }
4927
4928 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
4929 {
4930 unsigned int tensorIndex = tensorIndexes[slotIndex];
4931 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
4932 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
4933 }
4934}
4935
Mike Kelly377fb212023-01-10 15:55:28 +00004936void TfLiteParserImpl::SetupInputLayerTensorInfos(size_t subgraphIndex)
4937{
4938 CHECK_SUBGRAPH(m_Model, subgraphIndex);
4939
4940 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
4941 for (auto const& tensorIdAndPtr : inputs)
4942 {
4943 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
4944 m_TensorInfos.insert({tensorIdAndPtr.first, tensorInfo});
4945 }
4946}
4947
Kevin May7d96b162021-02-03 17:38:41 +00004948void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004949{
4950 CHECK_SUBGRAPH(m_Model, subgraphIndex);
4951
4952 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004953 for (auto const& tensorIdAndPtr : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01004954 {
4955 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
4956 IConnectableLayer* layer =
4957 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
4958
4959 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
4960 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
4961
4962 RegisterOutputSlots(subgraphIndex,
4963 VIRTUAL_OPERATOR_ID,
4964 layer,
4965 { static_cast<uint32_t>(tensorIdAndPtr.first) });
4966 }
4967}
4968
Kevin May7d96b162021-02-03 17:38:41 +00004969void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004970{
4971 CHECK_SUBGRAPH(m_Model, subgraphIndex);
4972
4973 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004974 for (auto const& tensorIdAndPtr : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01004975 {
4976 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
4977 IConnectableLayer* layer =
4978 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
4979
4980 RegisterInputSlots(subgraphIndex,
4981 VIRTUAL_OPERATOR_ID,
4982 layer,
4983 { static_cast<uint32_t>(tensorIdAndPtr.first) });
4984 }
4985}
4986
Mike Kelly377fb212023-01-10 15:55:28 +00004987void TfLiteParserImpl::SetupConstantLayerTensorInfos(size_t subgraph)
4988{
4989 CHECK_SUBGRAPH(m_Model, subgraph);
4990
4991 const auto & subgraphPtr = m_Model->subgraphs[subgraph];
4992 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
4993 {
4994 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
4995 {
4996 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
4997 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
4998 {
4999 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
5000
5001 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5002
5003 m_TensorInfos.insert({tensorIndex, tensorInfo});
5004 }
5005 }
5006 }
5007}
5008
Mike Kelly5880b912022-01-28 16:18:54 +00005009void TfLiteParserImpl::SetupConstantLayers(size_t subgraph)
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005010{
Mike Kelly5880b912022-01-28 16:18:54 +00005011 CHECK_SUBGRAPH(m_Model, subgraph);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005012
Mike Kelly5880b912022-01-28 16:18:54 +00005013 const auto & subgraphPtr = m_Model->subgraphs[subgraph];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005014 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
5015 {
5016 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
5017 {
5018 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
5019 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
5020 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01005021 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005022
Mike Kelly5880b912022-01-28 16:18:54 +00005023 if (IsConstTensor(tensorPtr))
Matthew Sloyan81beae32021-07-13 19:46:11 +01005024 {
5025 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
Mike Kelly5880b912022-01-28 16:18:54 +00005026 armnn::DataType dataType = tensorInfo.GetDataType();
5027
5028 if (std::find(m_ConstantsToDequantize.begin(), m_ConstantsToDequantize.end(), tensorPtr->buffer)
5029 != m_ConstantsToDequantize.end())
5030 {
5031 dataType = DataType::Float32;
5032 }
5033 auto tensorAndData = CreateConstTensorNonPermuted(tensorPtr, tensorInfo, dataType);
5034
5035 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
5036 IConnectableLayer *layer = m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
5037
5038 layer->GetOutputSlot(0).SetTensorInfo(tensorAndData.first.GetInfo());
5039 RegisterOutputSlots(subgraphIndex,
5040 VIRTUAL_OPERATOR_ID,
5041 layer,
5042 { tensorIndex });
5043 }
5044 else if (ShouldConstantTensorBeCreated(tensorIndex))
5045 {
5046 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5047 armnn::DataType dataType = tensorInfo.GetDataType();
5048
5049 if (std::find(m_ConstantsToDequantize.begin(), m_ConstantsToDequantize.end(), tensorPtr->buffer)
5050 != m_ConstantsToDequantize.end())
5051 {
5052 dataType = DataType::Float32;
5053 }
5054 // Make sure isConstant flag is set.
5055 tensorInfo.SetConstant();
5056 tensorInfo.SetDataType(dataType);
5057
5058 auto tensorAndData = ConstTensor(tensorInfo, std::vector<uint8_t>(tensorInfo.GetNumBytes()));
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005059
Matthew Sloyan81beae32021-07-13 19:46:11 +01005060 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005061 IConnectableLayer* layer = m_Network->AddConstantLayer(tensorAndData, layerName.c_str());
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005062
Matthew Sloyan81beae32021-07-13 19:46:11 +01005063 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
5064 RegisterOutputSlots(subgraphIndex,
5065 VIRTUAL_OPERATOR_ID,
5066 layer,
Mike Kelly5880b912022-01-28 16:18:54 +00005067 {tensorIndex});
Matthew Sloyan81beae32021-07-13 19:46:11 +01005068 }
5069 else
5070 {
5071 throw ParseException(
5072 fmt::format("Invalid Tensor: Tensor should be constant. {}",
5073 CHECK_LOCATION().AsString()));
5074 }
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005075 }
5076 }
5077 }
5078}
5079
telsoa01c577f2c2018-08-31 09:22:23 +01005080// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Kevin May7d96b162021-02-03 17:38:41 +00005081TfLiteParserImpl::BufferRawPtr TfLiteParserImpl::GetBuffer(const ModelPtr& model, size_t bufferIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005082{
5083 CHECK_BUFFER(model, bufferIndex);
5084 return model->buffers[bufferIndex].get();
5085}
5086
Matteo Martincigh747ef822018-12-18 09:26:39 +00005087template<typename T>
Kevin May7d96b162021-02-03 17:38:41 +00005088std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
5089TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
5090 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00005091 armnn::TensorInfo& tensorInfo,
5092 armnn::Optional<armnn::PermutationVector&> permutationVector)
5093{
Matthew Sloyan81beae32021-07-13 19:46:11 +01005094 // Make sure isConstant flag is set.
5095 tensorInfo.SetConstant();
5096
Matteo Martincigh747ef822018-12-18 09:26:39 +00005097 auto constData = CreateConstTensorImpl<T>(bufferPtr,
5098 tensorPtr,
5099 tensorInfo,
5100 permutationVector);
Kevin May7d96b162021-02-03 17:38:41 +00005101 TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
Matteo Martincigh747ef822018-12-18 09:26:39 +00005102 return std::make_pair(constData.first, std::move(storage));
5103}
5104
Mike Kelly5880b912022-01-28 16:18:54 +00005105bool TfLiteParserImpl::ShouldConstantTensorBeCreated(unsigned int tensorIndex)
5106{
5107 // If the TensorIndex appears in the list of ConstantsToBeCreated then return true
5108 return (std::find(m_ConstantsToBeCreated.begin(), m_ConstantsToBeCreated.end(), tensorIndex)
5109 != m_ConstantsToBeCreated.end());
5110}
5111
Finn Williamsd4fa5452021-03-01 12:31:41 +00005112bool TfLiteParserImpl::IsConstTensor(TensorRawPtr tensorPtr)
5113{
5114 CHECK_TENSOR_PTR(tensorPtr);
mathad01bf7edb62021-04-20 16:12:45 +01005115 bool isConst = true;
5116
5117 auto buffer = GetBuffer(m_Model, tensorPtr->buffer);
5118 if (buffer->data.size() == 0)
5119 {
5120 isConst = false;
5121 }
5122
5123 return isConst;
Finn Williamsd4fa5452021-03-01 12:31:41 +00005124}
5125
Kevin May7d96b162021-02-03 17:38:41 +00005126std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
Finn Williamsd4fa5452021-03-01 12:31:41 +00005127TfLiteParserImpl::CreateConstTensorPermuted(TensorRawPtr tensorPtr,
5128 armnn::TensorInfo& tensorInfo,
5129 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01005130{
5131 CHECK_TENSOR_PTR(tensorPtr);
5132 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5133 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5134
Matthew Sloyan81beae32021-07-13 19:46:11 +01005135 // Make sure isConstant flag is set.
5136 tensorInfo.SetConstant();
5137
telsoa01c577f2c2018-08-31 09:22:23 +01005138 switch (tensorInfo.GetDataType())
5139 {
5140 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005141 return CreateConstTensorAndStoreData<float>(bufferPtr,
5142 tensorPtr,
5143 tensorInfo,
5144 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00005145 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005146 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
5147 tensorPtr,
5148 tensorInfo,
5149 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00005150 case armnn::DataType::QSymmS8:
5151 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
5152 tensorPtr,
5153 tensorInfo,
5154 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00005155 case armnn::DataType::QAsymmS8:
5156 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
5157 tensorPtr,
5158 tensorInfo,
5159 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01005160 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005161 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
5162 tensorPtr,
5163 tensorInfo,
5164 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01005165 default:
5166 {
5167 std::stringstream errString;
5168 errString << "Unexpected datatype when creating const tensor: "
5169 << armnn::GetDataTypeName(tensorInfo.GetDataType())
5170 << " shape:" << tensorInfo.GetShape()
5171 << CHECK_LOCATION().AsString();
5172 throw ParseException(errString.str());
5173 }
5174 }
5175}
5176
Finn Williamsd4fa5452021-03-01 12:31:41 +00005177armnn::ConstTensor TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
5178 armnn::TensorInfo& tensorInfo)
5179{
5180 CHECK_TENSOR_PTR(tensorPtr);
5181 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5182 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5183
Matthew Sloyan81beae32021-07-13 19:46:11 +01005184 // Make sure isConstant flag is set.
5185 tensorInfo.SetConstant();
5186
Finn Williamsd4fa5452021-03-01 12:31:41 +00005187 return ConstTensor(tensorInfo, bufferPtr->data.data());
5188}
5189
Mike Kelly5880b912022-01-28 16:18:54 +00005190std::pair<armnn::ConstTensor, std::unique_ptr<float[]>>
5191TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
5192 armnn::TensorInfo& tensorInfo,
5193 armnn::DataType inputDataType)
5194{
5195 CHECK_TENSOR_PTR(tensorPtr);
5196 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5197 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5198
5199 // Make sure isConstant flag is set.
5200 tensorInfo.SetConstant();
5201
Mike Kelly0506ef02023-01-03 16:29:44 +00005202 if (inputDataType == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
Mike Kelly5880b912022-01-28 16:18:54 +00005203 {
Mike Kelly0506ef02023-01-03 16:29:44 +00005204 try
5205 {
5206 TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
5207 std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
5208 return std::make_pair(ConstTensor(constTensorInfo, data.get()), std::move(data));
5209 }
Cathal Corbett9c843c32023-01-09 17:51:37 +00005210 catch (InvalidArgumentException&)
Mike Kelly0506ef02023-01-03 16:29:44 +00005211 {
5212 throw ParseException(
5213 fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
5214 GetDataTypeName(DataType::Float32),
5215 GetDataTypeName(tensorInfo.GetDataType()),
5216 CHECK_LOCATION().AsString()));
5217 }
Mike Kelly5880b912022-01-28 16:18:54 +00005218 }
5219 else
5220 {
5221 return std::make_pair(ConstTensor(tensorInfo, bufferPtr->data.data()), std::unique_ptr<float[]>());
5222 }
5223}
5224
5225std::pair<armnn::ConstTensor*, std::unique_ptr<float[]>>
5226TfLiteParserImpl::CreateConstTensorPtr(TensorRawPtr tensorPtr, armnn::TensorInfo& inputTensorInfo)
5227{
5228 CHECK_TENSOR_PTR(tensorPtr);
5229 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5230 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5231 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5232
5233 // Make sure isConstant flag is set.
5234 tensorInfo.SetConstant();
5235
5236 if (inputTensorInfo.GetDataType() == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
5237 {
Mike Kelly0506ef02023-01-03 16:29:44 +00005238 try
5239 {
5240 TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
5241 std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
5242 return std::make_pair(new ConstTensor(constTensorInfo, data.get()), std::move(data));
5243 }
Cathal Corbett9c843c32023-01-09 17:51:37 +00005244 catch (InvalidArgumentException&)
Mike Kelly0506ef02023-01-03 16:29:44 +00005245 {
5246 throw ParseException(
5247 fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
5248 GetDataTypeName(DataType::Float32),
5249 GetDataTypeName(tensorInfo.GetDataType()),
5250 CHECK_LOCATION().AsString()));
5251 }
Mike Kelly5880b912022-01-28 16:18:54 +00005252 }
5253 else
5254 {
5255 return std::make_pair(new ConstTensor(tensorInfo, bufferPtr->data.data()), std::unique_ptr<float[]>());
5256 }
5257}
5258
Kevin May7d96b162021-02-03 17:38:41 +00005259BindingPointInfo TfLiteParserImpl::GetNetworkInputBindingInfo(size_t subgraphId,
5260 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01005261{
5262 CHECK_SUBGRAPH(m_Model, subgraphId);
5263 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005264 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005265 {
5266 if (input.second->name == name)
5267 {
5268 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
Colm Donelan4bc993b2021-11-09 20:39:10 +00005269 auto inputTensorInfo = ToTensorInfo(input.second);
5270 // Input tensors are always treated as constant tensors during network execution.
5271 inputTensorInfo.SetConstant(true);
5272 return std::make_pair(bindingId, inputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01005273 }
5274 }
5275
5276 std::stringstream bindings;
Mike Kelly0d77ae12022-01-07 17:42:27 +00005277 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005278 {
5279 bindings << "'" << input.second->name << "' ";
5280 }
5281
5282 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005283 fmt::format("No input binding found for subgraph:{} and name:{}. "
5284 "Possible inputs are: [{}] {}",
5285 subgraphId,
5286 name,
5287 bindings.str(),
5288 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005289}
5290
Kevin May7d96b162021-02-03 17:38:41 +00005291BindingPointInfo TfLiteParserImpl::GetNetworkOutputBindingInfo(size_t subgraphId,
5292 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01005293{
5294 CHECK_SUBGRAPH(m_Model, subgraphId);
5295 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005296 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005297 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005298 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01005299 if (output.second->name == name)
5300 {
5301 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Mike Kelly377fb212023-01-10 15:55:28 +00005302 std::vector<unsigned int> shape = m_OverriddenOutputShapes.size() > 0 ?
5303 m_OverriddenOutputShapes[i] : AsUnsignedVector(output.second->shape);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005304 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01005305 }
5306 }
5307
5308 std::stringstream bindings;
Mike Kelly0d77ae12022-01-07 17:42:27 +00005309 for (auto const& output : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005310 {
5311 bindings << "'" << output.second->name << "' ";
5312 }
5313
5314 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005315 fmt::format("No output binding found for subgraph:{} and name:{}. "
5316 "Possible outputs are: [{}] {}",
5317 subgraphId,
5318 name,
5319 bindings.str(),
5320 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005321}
5322
Kevin May7d96b162021-02-03 17:38:41 +00005323size_t TfLiteParserImpl::GetSubgraphCount() const
telsoa01c577f2c2018-08-31 09:22:23 +01005324{
5325 return m_Model->subgraphs.size();
5326}
5327
Kevin May7d96b162021-02-03 17:38:41 +00005328std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01005329{
5330 CHECK_SUBGRAPH(m_Model, subgraphId);
5331 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
5332 std::vector<std::string> result;
5333 result.reserve(inputs.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +00005334 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005335 {
5336 result.push_back(input.second->name);
5337 }
5338 return result;
5339}
5340
Kevin May7d96b162021-02-03 17:38:41 +00005341std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01005342{
5343 CHECK_SUBGRAPH(m_Model, subgraphId);
5344 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
5345 std::vector<std::string> result;
5346 result.reserve(outputs.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +00005347 for (auto const& output : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005348 {
5349 result.push_back(output.second->name);
5350 }
5351 return result;
5352}
5353
Matthew Sloyanac001ee2021-02-03 10:43:04 +00005354const std::string TfLiteParserImpl::GetVersion()
5355{
5356 return TFLITE_PARSER_VERSION;
5357}
5358
Mike Kelly0d77ae12022-01-07 17:42:27 +00005359TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005360: m_FloatData(std::move(data))
5361, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00005362, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01005363, m_Int32Data(nullptr)
5364{
5365}
5366
Mike Kelly0d77ae12022-01-07 17:42:27 +00005367TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005368: m_FloatData(nullptr)
5369, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00005370, m_Int8Data(nullptr)
5371, m_Int32Data(nullptr)
5372{
5373}
5374
Mike Kelly0d77ae12022-01-07 17:42:27 +00005375TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]>&& data)
Keith Davisd305e1a2020-01-22 11:57:54 +00005376: m_FloatData(nullptr)
5377, m_Uint8Data(nullptr)
5378, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01005379, m_Int32Data(nullptr)
5380{
5381}
5382
Mike Kelly0d77ae12022-01-07 17:42:27 +00005383TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005384: m_FloatData(nullptr)
5385, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00005386, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01005387, m_Int32Data(std::move(data))
5388{
5389}
5390
5391} // armnnTfLiteParser