blob: 279f804a0340b02049fd9d323317440c4a3c0497 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kelly04d82292023-01-19 18:29:40 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Sloyanac001ee2021-02-03 10:43:04 +00008#include "armnnTfLiteParser/Version.hpp"
Mike Kelly5880b912022-01-28 16:18:54 +00009#include "armnn/LstmParams.hpp"
Matthew Sloyanac001ee2021-02-03 10:43:04 +000010
Sadik Armagand109a4d2020-07-28 10:42:13 +010011#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +000012#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010013#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000014#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010015#include <armnn/Tensor.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000016#include <armnnUtils/TensorUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010017#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010018#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000019#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010020#include <armnn/utility/NumericCast.hpp>
Mike Kelly377fb212023-01-10 15:55:28 +000021#include <armnn/LayerSupport.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022
23// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000024#include <armnnUtils/Permute.hpp>
Rob Hughes9542f902021-07-14 09:48:54 +010025#include <armnnUtils/Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000026
Sadik Armagan479045b2018-10-01 11:51:37 +010027#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010028#include <VerificationHelpers.hpp>
29
30// The generated code based on the Tf Lite schema:
31#include <schema_generated.h>
32
Matteo Martincighe011d202019-11-28 11:35:47 +000033#include <flatbuffers/flexbuffers.h>
34
James Ward58dec6b2020-09-11 17:32:44 +010035#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010036
telsoa01c577f2c2018-08-31 09:22:23 +010037#include <algorithm>
Matthew Sloyanac001ee2021-02-03 10:43:04 +000038#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010039#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010040#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000041
42#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
43 { \
44 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
45 << ": " \
46 << CHECK_LOCATION().AsString()).str()); \
47 }
telsoa01c577f2c2018-08-31 09:22:23 +010048
49using namespace armnn;
50using armnn::CheckLocation;
51namespace armnnTfLiteParser
52{
Kevin May7d96b162021-02-03 17:38:41 +000053
54ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
55 pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
56
57ITfLiteParser::~ITfLiteParser() = default;
58
59ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
60{
61 return new ITfLiteParser(options);
62}
63
64ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
65{
66 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
67}
68
69void ITfLiteParser::Destroy(ITfLiteParser* parser)
70{
71 delete parser;
72}
73
74armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
75{
76 return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
77}
78
Mike Kelly0d77ae12022-01-07 17:42:27 +000079armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
Kevin May7d96b162021-02-03 17:38:41 +000080{
81 return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
82}
83
84BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
85 const std::string& name) const
86{
87 return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
88}
89
90BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
91 const std::string& name) const
92{
93 return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
94}
95
96size_t ITfLiteParser::GetSubgraphCount() const
97{
98 return pTfLiteParserImpl->GetSubgraphCount();
99}
100
101std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
102{
103 return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
104}
105
106std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
107{
108 return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
109}
110
telsoa01c577f2c2018-08-31 09:22:23 +0100111namespace
112{
jimfly01c25411c2018-11-14 17:47:22 +0000113
telsoa01c577f2c2018-08-31 09:22:23 +0100114const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
115
Mike Kelly0d77ae12022-01-07 17:42:27 +0000116void CheckSubgraph(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100117 size_t subgraphIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000118 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100119{
120 if (model.get() == nullptr)
121 {
122 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100123 fmt::format("{} was called with invalid (null) model. "
124 "Possible reason is that the model is not yet loaded and Unpack(ed). "
125 "subgraph:{} at {}",
126 location.m_Function,
127 subgraphIndex,
128 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100129 }
130 else if (subgraphIndex >= model->subgraphs.size())
131 {
132 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100133 fmt::format("{} was called with an invalid subgraph index. "
134 "subgraph:{} at {}",
135 location.m_Function,
136 subgraphIndex,
137 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100138 }
139}
140
141#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
142 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
143
Mike Kelly0d77ae12022-01-07 17:42:27 +0000144void CheckModel(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100145 size_t subgraphIndex,
146 size_t operatorIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000147 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100148{
149 if (model.get() == nullptr)
150 {
151 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100152 fmt::format("{} was called with invalid (null) model. "
153 "Possible reason is that the model is not yet loaded and Unpack(ed). "
154 "subgraph:{} operator:{} at {}",
155 location.m_Function,
156 subgraphIndex,
157 operatorIndex,
158 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100159 }
160 else if (subgraphIndex >= model->subgraphs.size())
161 {
162 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100163 fmt::format("{} was called with an invalid subgraph index. "
164 "subgraph:{} operator:{} at {}",
165 location.m_Function,
166 subgraphIndex,
167 operatorIndex,
168 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100169 }
170 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
171 operatorIndex != VIRTUAL_OPERATOR_ID)
172 {
173 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100174 fmt::format("{} was called with an invalid operator index. "
175 "subgraph:{} operator:{} at {}",
176 location.m_Function,
177 subgraphIndex,
178 operatorIndex,
179 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100180 }
181}
182
183#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
184 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
185
Mike Kelly0d77ae12022-01-07 17:42:27 +0000186void CheckTensor(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100187 size_t subgraphIndex,
188 size_t tensorIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000189 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100190{
191 // not checking model, because I assume CHECK_MODEL already run
192 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100193 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100194
195 // also subgraph index should be checked by CHECK_MODEL so
196 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100197 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100198
199 // the tensor index is the only one to check here
200 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
201 {
202 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100203 fmt::format("{} was called with an invalid tensor index. "
204 "subgraph:{} tensor:{} at {}",
205 location.m_Function,
206 subgraphIndex,
207 tensorIndex,
208 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100209 }
210}
211
212#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
213 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
214
Kevin May7d96b162021-02-03 17:38:41 +0000215void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000216 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100217{
218 if (rawPtr == nullptr)
219 {
220 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100221 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100222 }
223}
224
225#define CHECK_TENSOR_PTR(TENSOR_PTR) \
226 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
227
Mike Kelly0d77ae12022-01-07 17:42:27 +0000228void CheckBuffer(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100229 size_t bufferIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000230 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100231{
232 if (model.get() == nullptr)
233 {
234 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100235 fmt::format("{} was called with invalid (null) model. "
236 "Possible reason is that the model is not yet loaded and Unpack(ed). "
237 "buffer:{} at {}",
238 location.m_Function,
239 bufferIndex,
240 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100241 }
242 else if (bufferIndex >= model->buffers.size())
243 {
244 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100245 fmt::format("{} was called with an invalid buffer index. "
246 "buffer index:{} at {}",
247 location.m_Function,
248 bufferIndex,
249 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100250 }
251 else if (model->buffers[bufferIndex].get() == nullptr)
252 {
253 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100254 fmt::format("The buffer #{} is null. {}",
255 bufferIndex,
256 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100257 }
258}
259
260#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
261 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
262
Kevin May7d96b162021-02-03 17:38:41 +0000263void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000264 const armnn::TensorInfo& tensorInfo,
telsoa01c577f2c2018-08-31 09:22:23 +0100265 uint32_t bufferId,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000266 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100267{
268 if (bufferPtr == nullptr)
269 {
270 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100271 fmt::format("BufferPtr is null for buffer:{}. {}",
272 bufferId,
273 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100274 }
275 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
276 tensorInfo.GetNumBytes() > bufferPtr->data.size())
277 {
278 std::stringstream ss;
279 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
280 << "For tensor: " << tensorInfo.GetShape()
281 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
282 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
283 throw ParseException(ss.str());
284 }
285}
286
Mike Kelly0d77ae12022-01-07 17:42:27 +0000287
288tflite::BuiltinOperator GetOpCode(const TfLiteParserImpl::ModelPtr& model, size_t subgraphIndex, size_t operatorIndex)
289{
290 const auto& operatorPtr = model->subgraphs[subgraphIndex]->operators[operatorIndex];
291 auto opcodeIndex = operatorPtr->opcode_index;
292
293// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
294#if defined(ARMNN_POST_TFLITE_2_3)
295 auto opcode = std::max(model->operator_codes[opcodeIndex]->builtin_code,
296 static_cast<tflite::BuiltinOperator>(model->operator_codes[opcodeIndex]->deprecated_builtin_code));
297#else
298 auto opcode = model->operator_codes[opcodeIndex]->builtin_code;
299#endif
300 return opcode;
301}
302
303std::vector<unsigned int> GetUIntBuffer(armnn::TensorInfo info,
304 const TfLiteParserImpl::ModelPtr& model,
305 size_t bufferIndex)
306{
307 TfLiteParserImpl::BufferRawPtr bufferPtr = TfLiteParserImpl::GetBuffer(model, bufferIndex);
308 std::vector<unsigned int> buffer(info.GetNumElements());
309
310 if (info.GetDataType() == DataType::Signed32)
311 {
312 ::memcpy(buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
313 }
314 else if (info.GetDataType() == DataType::Signed64)
315 {
316 std::vector<uint64_t> uint64Buffer(info.GetNumElements());
317 ::memcpy(uint64Buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
318 buffer.assign(std::begin(uint64Buffer), std::end(uint64Buffer));
319 }
Mike Kelly0506ef02023-01-03 16:29:44 +0000320 else
321 {
322 CheckLocation location = CHECK_LOCATION();
323 throw ParseException(
324 fmt::format("Unsupported data type for uint buffer {}, only Signed 32 or Signed 64 are supported. {}",
325 GetDataTypeName(info.GetDataType()),
326 location.AsString()));
327 }
Mike Kelly0d77ae12022-01-07 17:42:27 +0000328 return buffer;
329}
330
telsoa01c577f2c2018-08-31 09:22:23 +0100331#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
332 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
333
334bool IsActivationSupported(tflite::ActivationFunctionType activationType)
335{
336 switch(activationType)
337 {
338 case tflite::ActivationFunctionType_NONE:
339 case tflite::ActivationFunctionType_RELU:
340 case tflite::ActivationFunctionType_RELU6:
341 case tflite::ActivationFunctionType_TANH:
342 {
343 return true;
344 }
345 default:
346 {
347 return false;
348 }
349 }
350}
351
352#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
353 do { \
354 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
355 { \
356 throw ParseException( \
Mike Kelly377fb212023-01-10 15:55:28 +0000357 fmt::format("TfLite parser doesn't support fused activation: " \
James Ward58dec6b2020-09-11 17:32:44 +0100358 "{}/{} in {} subgraph:{} operator:{} at {}", \
359 OPTION->fused_activation_function, \
360 tflite::EnumNameActivationFunctionType(\
361 OPTION->fused_activation_function), \
362 __func__, \
363 SUBGRAPH_INDEX, \
364 OPERATOR_INDEX, \
365 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100366 } \
367 } while(false)
368
369
Mike Kelly0d77ae12022-01-07 17:42:27 +0000370std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t>& in)
telsoa01c577f2c2018-08-31 09:22:23 +0100371{
372 std::vector<unsigned int> result;
373 result.reserve(in.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +0000374 for (auto& i : in)
telsoa01c577f2c2018-08-31 09:22:23 +0100375 {
mathad01c21025d2021-04-26 10:09:37 +0100376 // If the location of the input data is -1 then the input should be ignored.
377 if (i == -1)
378 {
379 continue;
380 }
telsoa01c577f2c2018-08-31 09:22:23 +0100381 result.push_back(CHECKED_NON_NEGATIVE(i));
382 }
383 return result;
384}
385
Mike Kelly5880b912022-01-28 16:18:54 +0000386bool IsOptionalOperandPresent(int input)
387{
388 return (input >= 0);
389}
390
telsoa01c577f2c2018-08-31 09:22:23 +0100391void CalcPadding(uint32_t inputSize,
392 uint32_t filterSize,
393 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100394 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100395 uint32_t& paddingFront,
396 uint32_t& paddingBack,
397 tflite::Padding padding)
398{
399 paddingFront = 0;
400 paddingBack = 0;
401 if (padding == tflite::Padding_SAME)
402 {
403 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100404 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
405 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100406 if (temp > inputSize)
407 {
408 paddingFront = (temp - inputSize) / 2;
409 paddingBack = (temp - inputSize) - paddingFront;
410 }
411 }
412}
413
Kevin May7d96b162021-02-03 17:38:41 +0000414armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Finn Williamsb49ed182021-06-29 15:50:08 +0100415 const std::vector<unsigned int>& shape,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100416 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100417{
418 armnn::DataType type;
419 CHECK_TENSOR_PTR(tensorPtr);
420
421 switch (tensorPtr->type)
422 {
423 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000424 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100425 break;
426 case tflite::TensorType_FLOAT32:
427 type = armnn::DataType::Float32;
428 break;
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100429 case tflite::TensorType_FLOAT16:
430 type = armnn::DataType::Float16;
431 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000432 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000433 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000434 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000435 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000436 type = armnn::DataType::QAsymmS8;
437 }
438 else
439 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000440 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000441 type = armnn::DataType::QSymmS8;
442 }
Finn Williamsed66d142019-12-06 09:55:55 +0000443 break;
444 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000445 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000446 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100447 case tflite::TensorType_INT32:
448 type = armnn::DataType::Signed32;
449 break;
Inki Daed4619e22020-09-10 15:33:54 +0900450 case tflite::TensorType_INT64:
451 type = armnn::DataType::Signed64;
452 break;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100453 case tflite::TensorType_BOOL:
454 type = armnn::DataType::Boolean;
455 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100456 default:
457 {
458 CheckLocation location = CHECK_LOCATION();
459 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100460 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
461 tensorPtr->type,
462 tflite::EnumNameTensorType(tensorPtr->type),
463 tensorPtr->name,
464 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100465 }
466 }
Finn Williamsb49ed182021-06-29 15:50:08 +0100467 TensorShape tensorShape;
468
469 std::vector<unsigned int> safeShape = shape;
470 if (shape.size() == 0)
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100471 {
472 safeShape.push_back(1);
Finn Williamsb49ed182021-06-29 15:50:08 +0100473 }
474
475 if (!outputTensor)
476 {
477 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()), safeShape.data());
478 }
479 else
480 {
Rob Hughesd812a312021-08-06 13:10:53 +0100481 size_t shapeSignatureSize = tensorPtr->shape_signature.size();
Finn Williamsb49ed182021-06-29 15:50:08 +0100482
483 // If a shape signature exists we will use that to infer dynamic tensors
484 if (shapeSignatureSize != 0)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100485 {
Finn Williamsb49ed182021-06-29 15:50:08 +0100486 // If the shape is incompatible with the shape signature override the shape
487 if (shapeSignatureSize != shape.size())
488 {
489 safeShape = {};
490
491 for (unsigned int i = 0; i < shapeSignatureSize; ++i)
492 {
493 unsigned int dim = tensorPtr->shape_signature[i] > -1 ?
494 static_cast<unsigned int>(tensorPtr->shape_signature[i]) : 0;
495 safeShape.push_back(dim);
496 }
497 }
498
Rob Hughesd812a312021-08-06 13:10:53 +0100499 std::unique_ptr<bool[]> dimMask = std::make_unique<bool[]>(tensorPtr->shape_signature.size());
Mike Kelly04d82292023-01-19 18:29:40 +0000500 bool batchOnly = true;
Finn Williamsb49ed182021-06-29 15:50:08 +0100501 for (unsigned int i = 0; i < tensorPtr->shape_signature.size(); ++i)
502 {
Mike Kelly04d82292023-01-19 18:29:40 +0000503 dimMask[i] = tensorPtr->shape_signature[i] != -1;
504
505 if (i > 0 && !dimMask[i])
506 {
507 batchOnly = false;
508 }
509 }
510 if (batchOnly)
511 {
512 dimMask[0] = true;
Finn Williamsb49ed182021-06-29 15:50:08 +0100513 }
Rob Hughesd812a312021-08-06 13:10:53 +0100514 tensorShape = TensorShape(static_cast<unsigned int>(safeShape.size()), safeShape.data(), dimMask.get());
Finn Williamsb49ed182021-06-29 15:50:08 +0100515 }
516 // If there is no shape signature treat the tensor as dynamic if the shape has a size of zero
517 else if (shape.size() == 0)
518 {
519 tensorShape = TensorShape(1, false);
520 }
521 else
522 {
523 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(shape.size()), shape.data());
Sadik Armagand109a4d2020-07-28 10:42:13 +0100524 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100525 }
526
Keith Davisd305e1a2020-01-22 11:57:54 +0000527 float quantizationScale = 0.0f;
528 int32_t quantizationOffset = 0;
529
530 if (tensorPtr->quantization.get())
531 {
532 if (tensorPtr->quantization->scale.size() <= 1)
533 {
534 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
535 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
536
537 if (tensorPtr->quantization->scale.size() == 1)
538 {
539 quantizationScale = tensorPtr->quantization->scale[0];
540 }
541 if (tensorPtr->quantization->zero_point.size() == 1)
542 {
543 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000544 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100545 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000546 }
547
Sadik Armagand109a4d2020-07-28 10:42:13 +0100548 armnn::TensorInfo result(tensorShape,
549 type,
550 quantizationScale,
551 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000552 return result;
553 }
554 else
555 {
556 std::vector<float> quantizationScales;
557 std::vector<int32_t> quantizationOffsets;
558
559 // Scale
560 std::copy(tensorPtr->quantization->scale.begin(),
561 tensorPtr->quantization->scale.end(),
562 std::back_inserter(quantizationScales));
563
Keith Davis0c2eeac2020-02-11 16:51:50 +0000564 // QSymmS8 Per-axis
Sadik Armagand109a4d2020-07-28 10:42:13 +0100565 armnn::TensorInfo result(tensorShape,
566 type,
567 quantizationScales,
Jan Eilers7612bd62021-04-06 17:29:03 +0100568 armnn::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
Keith Davisd305e1a2020-01-22 11:57:54 +0000569 return result;
570 }
571 }
572 else
573 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100574 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000575 type,
576 quantizationScale,
577 quantizationOffset);
578 return result;
579 }
telsoa01c577f2c2018-08-31 09:22:23 +0100580}
581
Kevin May7d96b162021-02-03 17:38:41 +0000582armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Mike Kelly377fb212023-01-10 15:55:28 +0000583 const bool outputTensor = false)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100584{
Mike Kelly0d77ae12022-01-07 17:42:27 +0000585 auto const& dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100586 return ToTensorInfo(tensorPtr, dimensions, outputTensor);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100587}
588
telsoa01c577f2c2018-08-31 09:22:23 +0100589template<typename T>
590std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
Kevin May7d96b162021-02-03 17:38:41 +0000591CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
592 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000593 armnn::TensorInfo& tensorInfo,
594 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100595{
Jan Eilers8eb25602020-03-09 12:13:48 +0000596 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100597 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
598 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
James Ward58dec6b2020-09-11 17:32:44 +0100599 fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
telsoa01c577f2c2018-08-31 09:22:23 +0100600
601 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000602
603 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
604 {
605 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000606 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
607 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000608 }
609 else
610 {
611 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
612 }
613
Matthew Sloyan81beae32021-07-13 19:46:11 +0100614 // Make sure isConstant flag is set.
615 tensorInfo.SetConstant();
616
telsoa01c577f2c2018-08-31 09:22:23 +0100617 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
618}
619
telsoa01c577f2c2018-08-31 09:22:23 +0100620armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
621{
622 // generate the binding id by shifting the tensor id by 8 bit
623 // and add the subgraph id, which allows 256 subgraphs
624 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
625}
626
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000627bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
628{
629 const unsigned int actualSize = actual.GetNumDimensions();
630 if (actualSize != expected.size())
631 {
632 return false;
633 }
634
635 for (unsigned int i = 0u; i < actualSize; i++)
636 {
637 if (expected[i] < 0 ||
638 actual[i] != static_cast<unsigned int>(expected[i]))
639 {
640 return false;
641 }
642 }
643
644 return true;
645}
646
Cathal Corbett2b922e22022-09-23 15:49:24 +0100647bool CheckShape(const armnn::TensorShape& actual, const armnn::TensorShape& expected)
648{
649 std::vector<int32_t> expectedVec;
650 for (uint32_t i = 0; i < expected.GetNumDimensions(); i++)
651 {
652 expectedVec.push_back(expected[i]);
653 }
654 return CheckShape(actual, expectedVec);
655}
656
James Conroy05102392020-06-24 15:39:55 +0100657void CheckMatchingQuantization(const TensorInfo& first,
658 const TensorInfo& second,
659 const std::string& descName,
660 std::string const& firstName,
661 std::string const& secondName)
662{
663 if (!first.IsQuantized() ||
664 !second.IsQuantized())
665 {
666 // Not a quantized type, ignore the validation
667 return;
668 }
669
670 DataType firstDataType = first.GetDataType();
671 DataType secondDataType = second.GetDataType();
672
673 if (firstDataType != secondDataType)
674 {
675 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
676 " must be of the same quantized type, " +
677 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
678 secondName + " is " + GetDataTypeName(secondDataType));
679 }
680
681 if (!first.IsTypeSpaceMatch(second))
682 {
683 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
684 " must have the same quantization space, " +
685 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
686 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
687 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
688 " and scale " + std::to_string(second.GetQuantizationScale()));
689 }
690}
691
Mike Kelly377fb212023-01-10 15:55:28 +0000692bool IsDynamic(TfLiteParserImpl::TensorRawPtr tensorPtr)
693{
694 auto shape = tensorPtr->shape;
695
696 if (shape.empty())
697 {
698 return true;
699 }
700 auto shapeSig = tensorPtr->shape_signature;
701
702 if (shapeSig.empty())
703 {
704 return false;
705 }
706
707 for (unsigned int i = 0; i < shapeSig.size() ; ++i)
708 {
709 if (shapeSig[i] == -1)
710 {
711 return true;
712 }
713 }
714 return false;
715}
716
telsoa01c577f2c2018-08-31 09:22:23 +0100717} // <anonymous>
718
Kevin May7d96b162021-02-03 17:38:41 +0000719TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100720: m_Options(options)
721, m_Network(nullptr, nullptr)
Kevin May7d96b162021-02-03 17:38:41 +0000722, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
telsoa01c577f2c2018-08-31 09:22:23 +0100723{
724 // register supported operators
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100725 m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
Kevin May7d96b162021-02-03 17:38:41 +0000726 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100727 m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
728 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
Kevin May7d96b162021-02-03 17:38:41 +0000729 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
730 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
Samuel Yapfd3ba5a2022-08-24 17:04:34 +0100731 m_ParserFunctions[tflite::BuiltinOperator_BATCH_MATMUL] = &TfLiteParserImpl::ParseBatchMatMul;
mathad01b392e982021-04-07 12:07:30 +0100732 m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
Kevin May7d96b162021-02-03 17:38:41 +0000733 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
734 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100735 // Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
Cathal Corbett80b4ef02022-05-25 11:21:11 +0100736 #if defined(ARMNN_POST_TFLITE_2_4)
Matthew Sloyaneb5f8102021-10-05 17:31:42 +0100737 m_ParserFunctions[tflite::BuiltinOperator_CONV_3D] = &TfLiteParserImpl::ParseConv3D;
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100738 #endif
Kevin May7d96b162021-02-03 17:38:41 +0000739 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
740 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
741 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
742 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100743 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000744 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300745 m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000746 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
Teresa Charlin3ab85482021-06-08 16:59:29 +0100747 m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
Teresa Charlincdbd40b2022-02-25 13:21:55 +0000748 m_ParserFunctions[tflite::BuiltinOperator_FLOOR_DIV] = &TfLiteParserImpl::ParseFloorDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000749 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
750 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
Teresa Charlin91a53ea2022-04-25 15:47:29 +0100751 m_ParserFunctions[tflite::BuiltinOperator_GATHER_ND] = &TfLiteParserImpl::ParseGatherNd;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300752 m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
753 m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000754 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
755 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300756 m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
757 m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
Mike Kelly31dce2b2021-09-01 21:22:37 +0100758 m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
759 = &TfLiteParserImpl::ParseLocalResponseNormalization;
Teresa Charlin28aa6692022-07-12 11:18:44 +0100760 m_ParserFunctions[tflite::BuiltinOperator_LOG] = &TfLiteParserImpl::ParseLog;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100761 m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
Kevin May7d96b162021-02-03 17:38:41 +0000762 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
Teresa Charlinfd33a692022-06-29 15:35:57 +0100763 m_ParserFunctions[tflite::BuiltinOperator_LOG_SOFTMAX] = &TfLiteParserImpl::ParseLogSoftmax;
Kevin May7d96b162021-02-03 17:38:41 +0000764 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
765 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
766 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
767 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
768 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +0100769 m_ParserFunctions[tflite::BuiltinOperator_MIRROR_PAD] = &TfLiteParserImpl::ParseMirrorPad;
Kevin May7d96b162021-02-03 17:38:41 +0000770 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
771 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300772 m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000773 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
774 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
Mike Kelly0d77ae12022-01-07 17:42:27 +0000775 m_ParserFunctions[tflite::BuiltinOperator_PADV2] = &TfLiteParserImpl::ParsePad;
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +0100776 m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
Kevin May7d96b162021-02-03 17:38:41 +0000777 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
778 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
779 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
Sadik Armagana2747482021-02-09 10:28:54 +0000780 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
781 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
Teresa Charlin4e3e8312021-08-05 12:34:37 +0100782 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
Kevin May7d96b162021-02-03 17:38:41 +0000783 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
784 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
785 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100786 m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
Teresa Charlinf0fce5b2022-05-04 17:24:43 +0100787 m_ParserFunctions[tflite::BuiltinOperator_SQRT] = &TfLiteParserImpl::ParseSqrt;
Keith Davis0176fd82021-06-01 17:36:32 +0100788 m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
Teresa Charlin28aa6692022-07-12 11:18:44 +0100789 m_ParserFunctions[tflite::BuiltinOperator_SIN] = &TfLiteParserImpl::ParseSin;
Kevin May7d96b162021-02-03 17:38:41 +0000790 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
791 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
792 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
793 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
794 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
795 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
796 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
797 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
798 m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
799 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
800 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
801 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
Mike Kelly5880b912022-01-28 16:18:54 +0000802 m_ParserFunctions[tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM]
803 = &TfLiteParserImpl::ParseUnidirectionalSequenceLSTM;
Kevin May7d96b162021-02-03 17:38:41 +0000804 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100805
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100806 // register supported custom operators
Kevin May7d96b162021-02-03 17:38:41 +0000807 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100808}
809
Mike Kelly377fb212023-01-10 15:55:28 +0000810armnn::TensorInfo TfLiteParserImpl::InputTensorInfo(size_t subgraphIndex,
811 size_t operatorIndex,
812 int input)
813{
814 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
815 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
816
817 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[input]);
818 auto search = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(inputId);
819
820 if (search != m_TensorInfos.end())
821 {
822 return m_TensorInfos[inputId];
823 }
824 else
825 {
826 auto tensorInfo = ::armnnTfLiteParser::ToTensorInfo(subgraphPtr->tensors[inputId].get());
827 m_TensorInfos.insert({ inputId, tensorInfo });
828 return tensorInfo;
829 }
830}
831
832armnn::TensorInfo TfLiteParserImpl::OutputTensorInfoFromInputs(size_t subgraphIndex,
833 size_t operatorIndex,
834 armnn::IConnectableLayer* layer,
835 int output,
836 std::vector<int> inputs)
837{
838 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
839 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
840
841 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[output]);
842
843 auto outputSearch = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(outputId);
844
845 if (outputSearch != m_TensorInfos.end())
846 {
847 return m_TensorInfos[outputId];
848 }
849
850 const auto& outputTensorPtr = subgraphPtr->tensors[outputId].get();
851 TensorInfo tensor = ::armnnTfLiteParser::ToTensorInfo(outputTensorPtr, true);
852
853 if (IsDynamic(outputTensorPtr))
854 {
855 if (inputs.empty())
856 {
857 for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
858 {
859 inputs.emplace_back(i);
860 }
861 }
862 auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex);
863 std::vector<armnn::TensorShape> inputShapes;
864
865 for (unsigned int i = 0; i < inputs.size(); ++i)
866 {
867 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[inputs[i]]);
868 auto search = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(inputId);
869
870 if (search != m_TensorInfos.end())
871 {
872 auto &inputTensorInfo = m_TensorInfos[inputId];
873 inputShapes.push_back(inputTensorInfo.GetShape());
874 }
875 else
876 {
877 m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
878 auto inputTensorInfo = ::armnnTfLiteParser::ToTensorInfo(subgraphPtr->tensors[inputId].get());
879 m_TensorInfos.insert({ inputId, inputTensorInfo});
880 inputShapes.push_back(inputTensorInfo.GetShape());
881 }
882 }
883 const auto outputShape = layer->InferOutputShapes(inputShapes)[output];
884 tensor.SetShape(outputShape);
885 }
886 m_TensorInfos.insert({ outputId, tensor});
887 return tensor;
888}
889
890armnn::TensorInfo TfLiteParserImpl::OutputTensorInfoFromShapes(size_t subgraphIndex,
891 size_t operatorIndex,
892 armnn::IConnectableLayer* layer,
893 int output,
894 std::vector<armnn::TensorShape> inputShapes)
895{
896 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
897 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
898
899 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[output]);
900 const auto& outputTensorPtr = subgraphPtr->tensors[outputId].get();
901 TensorInfo tensor = ::armnnTfLiteParser::ToTensorInfo(outputTensorPtr, true);
902
903 if (IsDynamic(outputTensorPtr))
904 {
905 const auto outputShape = layer->InferOutputShapes(inputShapes)[output];
906 tensor.SetShape(outputShape);
907 }
908 m_TensorInfos.insert({ outputId, tensor});
909 return tensor;
910}
911
Kevin May7d96b162021-02-03 17:38:41 +0000912void TfLiteParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100913{
914 m_Network = armnn::INetworkPtr(nullptr, nullptr);
915 m_Model = nullptr;
916 m_SubgraphConnections.clear();
Mike Kelly377fb212023-01-10 15:55:28 +0000917 m_OverriddenOutputShapes.clear();
Mike Kelly5880b912022-01-28 16:18:54 +0000918 m_ConstantsToDequantize.clear();
919 m_ConstantsToBeCreated.clear();
Mike Kelly377fb212023-01-10 15:55:28 +0000920 m_TensorInfos.clear();
telsoa01c577f2c2018-08-31 09:22:23 +0100921}
922
Kevin May7d96b162021-02-03 17:38:41 +0000923INetworkPtr TfLiteParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100924{
925 ResetParser();
926 m_Model = LoadModelFromFile(graphFile);
927 return CreateNetworkFromModel();
928}
929
Mike Kelly0d77ae12022-01-07 17:42:27 +0000930INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
telsoa01c577f2c2018-08-31 09:22:23 +0100931{
932 ResetParser();
933 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
934 return CreateNetworkFromModel();
935}
936
Finn Williamsb49ed182021-06-29 15:50:08 +0100937
938armnn::INetworkPtr TfLiteParserImpl::LoadModel(std::unique_ptr<tflite::ModelT> model)
939{
940 ResetParser();
941 m_Model = std::move(model);
942
943 return CreateNetworkFromModel();
944}
945
Kevin May7d96b162021-02-03 17:38:41 +0000946INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
telsoa01c577f2c2018-08-31 09:22:23 +0100947{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100948
949 using NetworkOptions = std::vector<BackendOptions>;
950 NetworkOptions networkOptions = {};
Mike Kelly80512b02022-05-16 23:10:42 +0100951 if (m_Options)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100952 {
Mike Kelly80512b02022-05-16 23:10:42 +0100953 if (m_Options.value().m_InferAndValidate)
954 {
955 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
956 {
957 { "InferAndValidate", true }
958 });
Sadik Armagand109a4d2020-07-28 10:42:13 +0100959
Mike Kelly80512b02022-05-16 23:10:42 +0100960 networkOptions.push_back(shapeInferenceMethodOption);
961 }
962 if (m_Options.value().m_AllowExpandedDims)
963 {
964 BackendOptions shapeInferenceMethodOption("AllowExpandedDims",
965 {
966 { "AllowExpandedDims", true }
967 });
968
969 networkOptions.push_back(shapeInferenceMethodOption);
970 }
Sadik Armagand109a4d2020-07-28 10:42:13 +0100971 }
Sadik Armagand109a4d2020-07-28 10:42:13 +0100972 m_Network = INetwork::Create(networkOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100973 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100974
telsoa01c577f2c2018-08-31 09:22:23 +0100975 if (m_Model->subgraphs.size() != 1)
976 {
977 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100978 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
979 m_Model->subgraphs.size(),
980 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100981 }
982
983 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +0100984 size_t operatorIndex = 0;
985 try
telsoa01c577f2c2018-08-31 09:22:23 +0100986 {
Colm Donelan6350d272020-06-09 16:56:25 +0100987 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100988 {
Mike Kelly377fb212023-01-10 15:55:28 +0000989 SetupInputLayerTensorInfos(subgraphIndex);
990 SetupConstantLayerTensorInfos(subgraphIndex);
991
Colm Donelan6350d272020-06-09 16:56:25 +0100992 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
993 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +0100994 {
Colm Donelan6350d272020-06-09 16:56:25 +0100995 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
Jim Flynnfca233e2021-09-23 12:16:53 +0100996
997// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100998#if defined(ARMNN_POST_TFLITE_2_3)
Jim Flynnfca233e2021-09-23 12:16:53 +0100999 auto builtinCode = std::max(opCodePtr->builtin_code,
1000 static_cast<tflite::BuiltinOperator>(opCodePtr->deprecated_builtin_code));
1001#else
telsoa01c577f2c2018-08-31 09:22:23 +01001002 auto builtinCode = opCodePtr->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +01001003#endif
telsoa01c577f2c2018-08-31 09:22:23 +01001004
1005 if (builtinCode > tflite::BuiltinOperator_MAX)
1006 {
James Ward58dec6b2020-09-11 17:32:44 +01001007 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
1008 "subgraph:{} operator idx:{}. {}",
1009 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
1010 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001011 }
1012
1013 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +01001014 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +01001015 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +01001016 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +01001017 }
telsoa01c577f2c2018-08-31 09:22:23 +01001018
Colm Donelan6350d272020-06-09 16:56:25 +01001019 SetupInputLayers(subgraphIndex);
1020 SetupOutputLayers(subgraphIndex);
1021 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001022
Colm Donelan6350d272020-06-09 16:56:25 +01001023 ++subgraphIndex;
1024 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001025 }
telsoa01c577f2c2018-08-31 09:22:23 +01001026 }
Colm Donelan6350d272020-06-09 16:56:25 +01001027 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +01001028 {
Colm Donelan6350d272020-06-09 16:56:25 +01001029 std::stringstream errorString;
1030 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
1031 << subgraphIndex << " error: " << e.what();
1032 ARMNN_LOG(error) << errorString.str();
1033 std::stringstream errors;
1034 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +01001035 throw ParseException(errors.str());
1036 }
1037
1038 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +01001039 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001040 {
1041 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
1042 {
1043 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
1044 {
1045 for (size_t inputSlotIdx = 0;
1046 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
1047 ++inputSlotIdx)
1048 {
1049 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
1050 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
1051 }
1052 }
1053 }
1054 }
telsoa01c577f2c2018-08-31 09:22:23 +01001055 return std::move(m_Network);
1056}
1057
Mike Kelly0506ef02023-01-03 16:29:44 +00001058bool TfLiteParserImpl::ShouldConstantTensorBeConverted(TfLiteParserImpl::TensorRawPtr tensorPtr,
1059 armnn::DataType inputDataType,
1060 armnn::DataType tensorDataType)
Mike Kelly5880b912022-01-28 16:18:54 +00001061{
Mike Kelly0506ef02023-01-03 16:29:44 +00001062 return (TfLiteParserImpl::IsConstTensor(tensorPtr) && inputDataType == DataType::Float32 &&
1063 (tensorDataType == DataType::QAsymmU8 ||
1064 tensorDataType == DataType::QAsymmS8 ||
1065 tensorDataType == DataType::QSymmS8 ||
1066 tensorDataType == DataType::Signed32 ||
1067 tensorDataType == DataType::Signed64));
Mike Kelly5880b912022-01-28 16:18:54 +00001068}
1069
Kevin May7d96b162021-02-03 17:38:41 +00001070void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
1071 size_t tensorIndex,
1072 armnn::IOutputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +01001073{
1074 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001075 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
1076 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001077
1078 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
1079
Nikhil Rajd4d1c312022-08-03 18:20:59 +01001080 if (slot->GetOwningIConnectableLayer().GetType() != LayerType::Constant)
telsoa01c577f2c2018-08-31 09:22:23 +01001081 {
telsoa01c577f2c2018-08-31 09:22:23 +01001082
Nikhil Rajd4d1c312022-08-03 18:20:59 +01001083 // assuming there is only one producer for that tensor
1084 if (tensorSlots.outputSlot != nullptr)
1085 {
1086 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
1087 "subgraph:{} tensor:{} {}",
1088 subgraphIndex,
1089 tensorIndex,
1090 CHECK_LOCATION().AsString()));
1091 }
1092 }
telsoa01c577f2c2018-08-31 09:22:23 +01001093 tensorSlots.outputSlot = slot;
1094}
1095
Kevin May7d96b162021-02-03 17:38:41 +00001096void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
1097 size_t tensorIndex,
1098 armnn::IInputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +01001099{
1100 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001101 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
1102 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001103
Finn Williamsd4fa5452021-03-01 12:31:41 +00001104 TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01001105 tensorSlots.inputSlots.push_back(slot);
1106}
1107
Kevin May7d96b162021-02-03 17:38:41 +00001108void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001109{
1110 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1111
1112 // NOTE: By default we presume the custom operator is not supported
Kevin May7d96b162021-02-03 17:38:41 +00001113 auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001114
1115 // Identify custom code defined for custom operator
1116 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1117 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
1118
Mike Kelly377fb212023-01-10 15:55:28 +00001119 // Find parser function that corresponds to custom code (if any)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001120 auto iterator = m_CustomParserFunctions.find(customCode);
1121 if (iterator != m_CustomParserFunctions.end())
1122 {
1123 customParserFunction = iterator->second;
1124 }
1125
1126 // Run parser function
1127 (this->*customParserFunction)(subgraphIndex, operatorIndex);
1128}
1129
Kevin May7d96b162021-02-03 17:38:41 +00001130void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001131{
1132 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001133
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001134 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1135
1136 auto opcodeIndex = operatorPtr->opcode_index;
Jim Flynnfca233e2021-09-23 12:16:53 +01001137
1138// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001139#if defined(ARMNN_POST_TFLITE_2_3)
Jim Flynnfca233e2021-09-23 12:16:53 +01001140 auto opcode = std::max(m_Model->operator_codes[opcodeIndex]->builtin_code,
1141 static_cast<tflite::BuiltinOperator>(m_Model->operator_codes[opcodeIndex]->deprecated_builtin_code));
1142#else
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001143 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +01001144#endif
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001145
1146 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
1147 {
1148 // Do not add StandInLayer, throw ParseException instead
1149 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001150 fmt::format("Operator not supported. "
1151 "subgraph:{} operator:{} "
1152 "opcode_index:{} opcode:{} / {} {}",
1153 subgraphIndex,
1154 operatorIndex,
1155 opcodeIndex,
1156 opcode,
1157 tflite::EnumNameBuiltinOperator(opcode),
1158 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001159 }
1160
1161 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1162 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1163
Matthew Sloyan589e3e82020-09-11 16:17:48 +01001164 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
1165 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001166
1167 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +01001168 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001169
1170 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
1171 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001172 ARMNN_ASSERT(layer != nullptr);
1173
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001174 for (unsigned int i = 0u; i < numOutputs; ++i)
1175 {
Mike Kelly04d82292023-01-19 18:29:40 +00001176 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[0], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001177 }
1178
1179 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1180 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1181
1182 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
1183 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +01001184}
1185
mathad01b392e982021-04-07 12:07:30 +01001186void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex)
1187{
1188 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1189
1190 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1191 CHECK_VALID_SIZE(inputs.size(), 1);
1192 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1193 CHECK_VALID_SIZE(outputs.size(), 1);
1194
1195 auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex);
1196
1197 IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str());
1198 ARMNN_ASSERT(layer != nullptr);
1199
Mike Kelly377fb212023-01-10 15:55:28 +00001200 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
mathad01b392e982021-04-07 12:07:30 +01001201 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1202
1203 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1204 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1205
1206 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1207 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1208}
1209
Kevin May7d96b162021-02-03 17:38:41 +00001210void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001211{
1212 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1213
Mike Kelly0d77ae12022-01-07 17:42:27 +00001214 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1215 const auto* options = operatorPtr->builtin_options.AsConv2DOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01001216
1217 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1218
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001219 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1220 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1221 CHECK_VALID_SIZE(outputs.size(), 1);
1222
telsoa01c577f2c2018-08-31 09:22:23 +01001223 Convolution2dDescriptor desc;
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001224 inputs.size() == 3 ?
1225 desc.m_BiasEnabled = true : desc.m_BiasEnabled = false;
telsoa01c577f2c2018-08-31 09:22:23 +01001226 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1227 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001228 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +01001229 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1230 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001231
Mike Kelly377fb212023-01-10 15:55:28 +00001232 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1233 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001234
1235 // assuming input is NHWC
1236 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001237 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
telsoa01c577f2c2018-08-31 09:22:23 +01001238
1239 // assuming the filter is OHWI : Output, H, W, Input
1240 // which is essentially the same as NHWC
1241 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001242 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
telsoa01c577f2c2018-08-31 09:22:23 +01001243
Pablo Tellof0bd6832019-04-26 17:58:13 +01001244 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1245 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1246 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1247 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001248
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001249 // Add the first input and weights tensor to the registration list.
1250 // The constant weights will be added by SetupConstantLayers.
1251 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1252 std::vector<unsigned int> tensorIndexesToRegister = { inputTensorIndexes[0], inputTensorIndexes[1] };
telsoa01c577f2c2018-08-31 09:22:23 +01001253
James Ward58dec6b2020-09-11 17:32:44 +01001254 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001255 armnn::IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, layerName.c_str());
telsoa01c577f2c2018-08-31 09:22:23 +01001256
Mike Kelly0506ef02023-01-03 16:29:44 +00001257 if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
telsoa01c577f2c2018-08-31 09:22:23 +01001258 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001259 m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
telsoa01c577f2c2018-08-31 09:22:23 +01001260 }
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001261
1262 if (desc.m_BiasEnabled)
telsoa01c577f2c2018-08-31 09:22:23 +01001263 {
Mike Kelly377fb212023-01-10 15:55:28 +00001264 armnn::TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001265
1266 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1267 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
1268
Mike Kelly0506ef02023-01-03 16:29:44 +00001269 if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001270 {
1271 m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
1272 }
telsoa01c577f2c2018-08-31 09:22:23 +01001273 }
1274
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001275 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001276
Mike Kelly377fb212023-01-10 15:55:28 +00001277 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
jimfly01c25411c2018-11-14 17:47:22 +00001278 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001279
1280 // register the input connection slots for the layer, connections are made after all layers have been created
1281 // only the tensors for the inputs are relevant, exclude the const tensors
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001282 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
telsoa01c577f2c2018-08-31 09:22:23 +01001283
jimfly01c25411c2018-11-14 17:47:22 +00001284 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001285 // register the output connection slots for the layer, connections are made after all layers have been created
1286 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001287 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, { outputTensorIndexes[0] });
telsoa01c577f2c2018-08-31 09:22:23 +01001288}
1289
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001290// Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
Cathal Corbett80b4ef02022-05-25 11:21:11 +01001291#if defined(ARMNN_POST_TFLITE_2_4)
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001292void TfLiteParserImpl::ParseConv3D(size_t subgraphIndex, size_t operatorIndex)
1293{
1294 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1295
1296 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1297 const auto* options = operatorPtr->builtin_options.AsConv3DOptions();
1298
1299 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1300
1301 Convolution3dDescriptor desc;
1302 desc.m_BiasEnabled = false;
1303 desc.m_DataLayout = armnn::DataLayout::NDHWC;
1304 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1305 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1306 desc.m_StrideZ = CHECKED_NON_NEGATIVE(options->stride_d);
1307 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1308 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
1309 desc.m_DilationZ = CHECKED_NON_NEGATIVE(options->dilation_d_factor);
1310
1311 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1312 CHECK_VALID_SIZE(inputs.size(), 2, 3);
1313
1314 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1315 CHECK_VALID_SIZE(outputs.size(), 1);
1316
Mike Kelly377fb212023-01-10 15:55:28 +00001317 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1318 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001319
1320 // Assuming input is NDHWC
1321 unsigned int inputDepth = inputTensorInfo.GetShape()[1];
1322 unsigned int inputHeight = inputTensorInfo.GetShape()[2];
1323 unsigned int inputWidth = inputTensorInfo.GetShape()[3];
1324
1325 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
1326 unsigned int filterDepth = filterTensorInfo.GetShape()[0];
1327 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1328 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1329
1330 CalcPadding(inputDepth, filterDepth, desc.m_StrideZ,
Teresa Charlin502ab942022-03-23 17:23:07 +00001331 desc.m_DilationZ, desc.m_PadFront, desc.m_PadBack, options->padding);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001332 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1333 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1334 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1335 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
1336
Mike Kelly5880b912022-01-28 16:18:54 +00001337 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001338
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001339 auto layerName = fmt::format("Conv3D:{}:{}", subgraphIndex, operatorIndex);
1340
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001341 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1342 // Add the first input and weights tensor to the registration list.
1343 // The constant weights will be added by SetupConstantLayers.
1344 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
1345
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001346 if (inputs.size() == 3)
1347 {
1348 desc.m_BiasEnabled = true;
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001349
1350 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1351 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001352 }
1353
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001354 armnn::IConnectableLayer* layer = m_Network->AddConvolution3dLayer(desc, layerName.c_str());
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001355 ARMNN_ASSERT(layer != nullptr);
1356
Mike Kelly377fb212023-01-10 15:55:28 +00001357 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001358 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1359
1360 // Register the input connection slots for the layer, connections are made after all layers have been created
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001361 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001362
1363 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1364 // Register the output connection slots for the layer, connections are made after all layers have been created
1365 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1366 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1367}
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001368#endif
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001369
Kevin May7d96b162021-02-03 17:38:41 +00001370void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001371{
1372 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1373
Mike Kelly0d77ae12022-01-07 17:42:27 +00001374 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1375 const auto* options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01001376
1377 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1378
1379 DepthwiseConvolution2dDescriptor desc;
telsoa01c577f2c2018-08-31 09:22:23 +01001380 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1381 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001382 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001383 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +01001384
1385 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1386 CHECK_VALID_SIZE(inputs.size(), 2, 3);
Cathal Corbett06902652022-04-14 17:55:11 +01001387 if (inputs.size() == 3)
1388 {
1389 desc.m_BiasEnabled = true;
1390 }
1391
telsoa01c577f2c2018-08-31 09:22:23 +01001392 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1393 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +01001394 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1395 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001396
Mike Kelly377fb212023-01-10 15:55:28 +00001397 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1398 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001399
Matteo Martincigh747ef822018-12-18 09:26:39 +00001400 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +01001401 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1402 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +00001403
1404 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +01001405 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1406 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1407
Pablo Tellof0bd6832019-04-26 17:58:13 +01001408 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1409 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1410 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1411 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001412
Jan Eilers53ef7952021-06-02 12:01:25 +01001413 // ArmNN uses the same filter tensor layout at TfLite [1, H, W, O] no need for any permutation
James Ward58dec6b2020-09-11 17:32:44 +01001414 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001415
Cathal Corbett06902652022-04-14 17:55:11 +01001416 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1417 // Add the first input and weights tensor to the registration list.
1418 // The constant weights will be added by SetupConstantLayers.
1419 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
1420
1421 armnn::IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, layerName.c_str());
1422
1423 if (desc.m_BiasEnabled)
telsoa01c577f2c2018-08-31 09:22:23 +01001424 {
1425 desc.m_BiasEnabled = true;
Mike Kelly377fb212023-01-10 15:55:28 +00001426 TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Cathal Corbett06902652022-04-14 17:55:11 +01001427
1428 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1429 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
telsoa01c577f2c2018-08-31 09:22:23 +01001430 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001431 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001432
Mike Kelly377fb212023-01-10 15:55:28 +00001433 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
jimfly01c25411c2018-11-14 17:47:22 +00001434 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001435
1436 // register the input connection slots for the layer, connections are made after all layers have been created
1437 // only the tensors for the inputs are relevant, exclude the const tensors
Cathal Corbett06902652022-04-14 17:55:11 +01001438 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
telsoa01c577f2c2018-08-31 09:22:23 +01001439
jimfly01c25411c2018-11-14 17:47:22 +00001440 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001441 // register the output connection slots for the layer, connections are made after all layers have been created
1442 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1443 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1444}
1445
Kevin May7d96b162021-02-03 17:38:41 +00001446void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsed66d142019-12-06 09:55:55 +00001447{
1448 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1449
1450 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1451 CHECK_VALID_SIZE(inputs.size(), 1);
1452
1453 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1454 CHECK_VALID_SIZE(outputs.size(), 1);
1455
James Ward58dec6b2020-09-11 17:32:44 +01001456 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +00001457
1458 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001459 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +00001460
Mike Kelly377fb212023-01-10 15:55:28 +00001461 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Finn Williamsed66d142019-12-06 09:55:55 +00001462 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1463
1464 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1465 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1466
1467 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1468 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1469}
1470
Teresa Charlin3ab85482021-06-08 16:59:29 +01001471void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorIndex)
1472{
1473 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1474
1475 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1476 CHECK_VALID_SIZE(inputs.size(), 2);
1477
1478 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1479 CHECK_VALID_SIZE(outputs.size(), 1);
1480
1481 auto layerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
1482
Mike Kelly377fb212023-01-10 15:55:28 +00001483 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001484 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1485
1486 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1487
1488 ReshapeDescriptor reshapeDesc;
Finn Williamsb49ed182021-06-29 15:50:08 +01001489
1490 if (outputTensorInfo.GetShape().AreAllDimensionsSpecified())
1491 {
1492 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1493 }
1494 else
1495 {
1496 int32_t axis = inputs[1]->shape[0];
1497
1498 int32_t inputDimSize = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
1499
1500 if (axis > inputDimSize || axis < 0 - (inputDimSize + 1))
1501 {
1502 throw ParseException("axis must be in range [0 - (inputDimSize + 1), inputDimSize] inclusive");
1503 }
1504
1505 if(axis < 0)
1506 {
1507 axis = inputDimSize + axis + 1;
1508 }
1509
Rob Hughesd812a312021-08-06 13:10:53 +01001510 std::vector<unsigned int> shape(static_cast<unsigned int>(inputDimSize) + 1);
Finn Williamsb49ed182021-06-29 15:50:08 +01001511 unsigned int inputShapeIndex = 0;
1512 for (unsigned int i = 0; i < static_cast<unsigned int>(inputDimSize + 1); ++i)
1513 {
1514 if (i == static_cast<unsigned int>(axis))
1515 {
1516 shape[i] = 1;
1517 }
1518 else
1519 {
1520 shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
1521 ++inputShapeIndex;
1522 }
1523 }
1524
Rob Hughesd812a312021-08-06 13:10:53 +01001525 reshapeDesc.m_TargetShape = TensorShape(static_cast<unsigned int>(inputDimSize + 1), shape.data());
Finn Williamsb49ed182021-06-29 15:50:08 +01001526 }
Teresa Charlin3ab85482021-06-08 16:59:29 +01001527
1528 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1529 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001530
1531 reshapeDesc.m_TargetShape = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}).GetShape();
1532 outputTensorInfo.SetShape(reshapeDesc.m_TargetShape);
1533
Teresa Charlin3ab85482021-06-08 16:59:29 +01001534 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1535
1536 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1537 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1538
1539 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1540 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1541}
1542
Kevin May7d96b162021-02-03 17:38:41 +00001543void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
Keith Davis4cd29a02019-09-09 14:49:20 +01001544{
1545 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1546
1547 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001548 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001549
1550 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1551 CHECK_VALID_SIZE(outputs.size(), 1);
1552
James Ward58dec6b2020-09-11 17:32:44 +01001553 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001554 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001555
josh minorba424d22019-11-13 10:55:17 -06001556 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001557 {
Mike Kelly377fb212023-01-10 15:55:28 +00001558 armnn::TensorInfo permuteTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Kevin May85d92602019-09-27 17:21:06 +01001559 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001560 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1561 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001562 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001563 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001564
Mike Kelly08759e22020-03-02 11:41:31 +00001565 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001566 }
Mike Kelly377fb212023-01-10 15:55:28 +00001567 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Keith Davis4cd29a02019-09-09 14:49:20 +01001568
James Conroy05102392020-06-24 15:39:55 +01001569 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001570 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001571
1572 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1573 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001574 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1575
1576 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1577 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1578
1579 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1580 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1581}
1582
Kevin May7d96b162021-02-03 17:38:41 +00001583void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001584{
1585 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1586
Mike Kelly0d77ae12022-01-07 17:42:27 +00001587 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1588 const auto* options = operatorPtr->builtin_options.AsTransposeConvOptions();
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001589
1590 TransposeConvolution2dDescriptor desc;
1591 desc.m_BiasEnabled = false;
1592 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1593 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1594 desc.m_DataLayout = armnn::DataLayout::NHWC;
1595
1596 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
David Monahan61683802021-01-12 09:11:07 +00001597 if (inputs.size() == 4)
1598 {
1599 desc.m_BiasEnabled = true;
1600 }
1601 else
1602 {
1603 CHECK_VALID_SIZE(inputs.size(), 3);
1604 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001605
1606 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1607 CHECK_VALID_SIZE(outputs.size(), 1);
1608
Ryan OSheaf0a35b82023-02-21 18:32:30 +00001609 // This block determines the output shape of the transpose convolution. If the output shape tensor ptr is not null
1610 // And the tensor is a constant, we can access the data at load time and set the output shape of the
1611 // layer. If this is not constant, We do not have access to the shape data, so we have to use
1612 // infer output shape and skip this code block.
1613 if (inputs[0] && IsConstTensor(inputs[0]))
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001614 {
Mike Kelly377fb212023-01-10 15:55:28 +00001615 armnn::TensorInfo tensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001616 std::vector<int> output_shape(tensorInfo.GetNumElements());
Mike Kelly377fb212023-01-10 15:55:28 +00001617
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001618 if (tensorInfo.GetDataType() == DataType::Signed32)
1619 {
1620 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1621 }
1622 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1623 {
1624 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1625 {
1626 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1627 }
1628 }
1629 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1630 for (int dimension : output_shape)
1631 {
1632 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1633 }
1634 desc.m_OutputShapeEnabled = true;
1635 }
Mike Kelly377fb212023-01-10 15:55:28 +00001636 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
1637 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001638
1639 // TfLite uses NHWC tensors
1640 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1641 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1642
1643 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1644 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1645
1646 CalcPadding(inputHeight,
1647 filterHeight,
1648 desc.m_StrideY,
1649 1, // DilationY
1650 desc.m_PadTop,
1651 desc.m_PadBottom,
1652 options->padding);
1653
1654 CalcPadding(inputWidth,
1655 filterWidth,
1656 desc.m_StrideX,
1657 1, // DilationX
1658 desc.m_PadLeft,
1659 desc.m_PadRight,
1660 options->padding);
1661
Mike Kelly5880b912022-01-28 16:18:54 +00001662 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001663
1664 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001665 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001666
David Monahan61683802021-01-12 09:11:07 +00001667 if (desc.m_BiasEnabled)
1668 {
Mike Kelly377fb212023-01-10 15:55:28 +00001669 auto biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 3);
Mike Kelly5880b912022-01-28 16:18:54 +00001670 auto biasConstTensor = CreateConstTensorNonPermuted(inputs[3], biasTensorInfo, inputTensorInfo.GetDataType());
David Monahan61683802021-01-12 09:11:07 +00001671 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Mike Kelly5880b912022-01-28 16:18:54 +00001672 filterTensorAndData.first,
1673 biasConstTensor.first,
David Monahan61683802021-01-12 09:11:07 +00001674 layerName.c_str());
1675 }
1676 else
1677 {
1678 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Mike Kelly5880b912022-01-28 16:18:54 +00001679 filterTensorAndData.first,
David Monahan61683802021-01-12 09:11:07 +00001680 EmptyOptional(),
1681 layerName.c_str());
1682 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001683
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001684 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001685
Mike Kelly377fb212023-01-10 15:55:28 +00001686 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0 , { 2, 1 });
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001687 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1688
1689 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1690 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001691 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001692
1693 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1694 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1695}
1696
Kevin May7d96b162021-02-03 17:38:41 +00001697void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001698{
1699 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1700}
1701
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001702void TfLiteParserImpl::ParseBatchMatMul(size_t subgraphIndex, size_t operatorIndex)
1703{
1704 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1705
1706 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1707 CHECK_VALID_SIZE(inputs.size(), 2);
1708
1709 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1710 CHECK_VALID_SIZE(outputs.size(), 1);
1711
1712 auto layerName = fmt::format("BatchMatMul:{}:{}", subgraphIndex, operatorIndex);
1713
Mike Kelly377fb212023-01-10 15:55:28 +00001714 TensorInfo inputXTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1715 TensorInfo inputYTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001716
1717 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1718 const auto* options = operatorPtr->builtin_options.AsBatchMatMulOptions();
1719
Teresa Charlinbc37a6b2022-09-22 10:12:58 +01001720 // Adjoint in tensorflow lite performs transpose operation
1721 BatchMatMulDescriptor descriptor(options->adj_x,
1722 options->adj_y,
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001723 false,
Teresa Charlinbc37a6b2022-09-22 10:12:58 +01001724 false);
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001725 // Arbitrary DataLayout
1726
1727 IConnectableLayer* layer = m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str());
1728 ARMNN_ASSERT(layer != nullptr);
1729
Mike Kelly377fb212023-01-10 15:55:28 +00001730 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001731 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1732
1733 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1734 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1735
1736 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1737 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1738}
1739
Kevin May7d96b162021-02-03 17:38:41 +00001740void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001741{
1742 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1743
1744 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1745 CHECK_VALID_SIZE(inputs.size(), 3);
1746
1747 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1748 CHECK_VALID_SIZE(outputs.size(), 1);
1749
Mike Kelly377fb212023-01-10 15:55:28 +00001750 armnn::TensorInfo blockShapeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001751 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1752
Mike Kelly377fb212023-01-10 15:55:28 +00001753 armnn::TensorInfo cropsTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001754 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1755
1756 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1757 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1758
1759 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1760 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1761
1762 size_t step = 2;
1763 std::vector<std::pair<unsigned int, unsigned int>> crops;
1764 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1765 {
1766 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1767 }
1768
1769 armnn::BatchToSpaceNdDescriptor desc;
1770 desc.m_BlockShape = blockShape;
1771 desc.m_Crops = crops;
1772 desc.m_DataLayout = armnn::DataLayout::NHWC;
1773
James Ward58dec6b2020-09-11 17:32:44 +01001774 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001775
Mike Kelly377fb212023-01-10 15:55:28 +00001776 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01001777
1778 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1779 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001780
1781 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1782 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001783 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1784
1785 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1786 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1787
1788 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1789 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1790}
1791
Kevin May7d96b162021-02-03 17:38:41 +00001792void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson28c94572019-07-18 10:47:03 +01001793{
1794 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1795
1796 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1797 CHECK_VALID_SIZE(inputs.size(), 1);
1798
1799 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1800 CHECK_VALID_SIZE(outputs.size(), 1);
1801
1802 L2NormalizationDescriptor desc;
1803 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001804 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001805 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1806
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001807 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001808
Mike Kelly377fb212023-01-10 15:55:28 +00001809 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Jackson28c94572019-07-18 10:47:03 +01001810 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1811
1812 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1813 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1814
1815 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1816 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1817}
1818
Kevin May7d96b162021-02-03 17:38:41 +00001819void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001820{
1821 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1822}
1823
Kevin May7d96b162021-02-03 17:38:41 +00001824void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001825{
1826 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1827
1828 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1829 CHECK_VALID_SIZE(inputs.size(), 2);
1830
1831 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1832 CHECK_VALID_SIZE(outputs.size(), 1);
1833
James Ward58dec6b2020-09-11 17:32:44 +01001834 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001835
Mike Kelly377fb212023-01-10 15:55:28 +00001836 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1837 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
James Conroy05102392020-06-24 15:39:55 +01001838 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001839
James Conroy05102392020-06-24 15:39:55 +01001840 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1841 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001842
1843 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
1844 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001845 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1846
1847 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001848 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001849
1850 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1851 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1852}
1853
Kevin May7d96b162021-02-03 17:38:41 +00001854void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001855{
1856 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1857
1858 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1859 CHECK_VALID_SIZE(inputs.size(), 2);
1860
1861 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1862 CHECK_VALID_SIZE(outputs.size(), 1);
1863
James Ward58dec6b2020-09-11 17:32:44 +01001864 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001865
Mike Kelly377fb212023-01-10 15:55:28 +00001866 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1867 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
James Conroy05102392020-06-24 15:39:55 +01001868 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001869
James Conroy05102392020-06-24 15:39:55 +01001870 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1871 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001872
1873 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
1874 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001875 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1876
1877 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001878 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001879
1880 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1881 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1882}
1883
Kevin May7d96b162021-02-03 17:38:41 +00001884void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
1885 size_t operatorIndex,
1886 PoolingAlgorithm algorithm)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001887{
1888 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1889
Mike Kelly0d77ae12022-01-07 17:42:27 +00001890 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1891 const auto* options = operatorPtr->builtin_options.AsPool2DOptions();
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001892
1893 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1894
1895 std::string layerName;
1896
1897 switch (algorithm)
1898 {
1899 case PoolingAlgorithm::Average:
1900 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001901 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001902 break;
1903 case PoolingAlgorithm::Max:
1904 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001905 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001906 break;
1907 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001908 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001909 }
1910
1911 Pooling2dDescriptor desc;
1912
1913 desc.m_PoolType = algorithm;
1914 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1915 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1916 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1917 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1918 desc.m_PaddingMethod = PaddingMethod::Exclude;
1919 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001920 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001921
1922 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1923 CHECK_VALID_SIZE(inputs.size(), 1);
Mike Kelly377fb212023-01-10 15:55:28 +00001924 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001925
1926 // assuming input is NHWC
1927 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1928 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1929
Pablo Tellof0bd6832019-04-26 17:58:13 +01001930 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1931 desc.m_PadTop, desc.m_PadBottom, options->padding);
1932 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1933 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001934
1935 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1936 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001937
James Conroy05102392020-06-24 15:39:55 +01001938 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1939 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001940
1941 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1942 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
jimfly01c25411c2018-11-14 17:47:22 +00001943 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001944
1945 // register the input connection slots for the layer, connections are made after all layers have been created
1946 // only the tensors for the inputs are relevant, exclude the const tensors
1947 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001948 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001949
jimfly01c25411c2018-11-14 17:47:22 +00001950 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001951 // register the output connection slots for the layer, connections are made after all layers have been created
1952 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1953 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1954}
1955
Kevin May7d96b162021-02-03 17:38:41 +00001956void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
josh minorba424d22019-11-13 10:55:17 -06001957{
1958 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1959
1960 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1961 CHECK_VALID_SIZE(inputs.size(), 3);
1962 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1963 CHECK_VALID_SIZE(outputs.size(), 1);
1964
1965 SliceDescriptor desc;
1966
1967 // set begin tensor info for slice descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00001968 armnn::TensorInfo beginTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
josh minorba424d22019-11-13 10:55:17 -06001969 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1970
1971 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1972 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1973
1974 // set size tensor info for slice descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00001975 armnn::TensorInfo sizeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
josh minorba424d22019-11-13 10:55:17 -06001976 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1977
Cathal Corbettde33dda2022-09-20 16:40:09 +01001978 std::vector<int> signedSize(sizeTensorInfo.GetNumElements(), 1);
1979
1980 // if size buffer data is not specified, all contents of size vector remain as values of 1
1981 if (sizeBufferPtr->data.data())
1982 {
1983 ::memcpy(signedSize.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1984 }
1985
josh minorba424d22019-11-13 10:55:17 -06001986 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
Mike Kelly377fb212023-01-10 15:55:28 +00001987 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly7ba84d62021-09-10 15:27:19 +01001988
1989 for (unsigned int i = 0; i < signedSize.size(); ++i)
1990 {
1991 int signedValue = signedSize[i];
Jim Flynnfca233e2021-09-23 12:16:53 +01001992
Mike Kelly7ba84d62021-09-10 15:27:19 +01001993 if (signedValue < -1 || signedValue > static_cast<int>(inputTensorInfo.GetShape()[i] - begin[i]))
1994 {
1995 throw ParseException(fmt::format("Invalid value for size {} size must be in range "
1996 "[-1, inputDimSize - begin] [-1, {}] inclusive {}",
1997 signedValue,
1998 inputTensorInfo.GetShape()[i] - begin[i],
1999 CHECK_LOCATION().AsString()));
2000 }
2001
2002 if (signedValue == -1)
2003 {
2004 size[i] = inputTensorInfo.GetShape()[i] - begin[i];
2005 }
2006 else
2007 {
2008 size[i] = static_cast<unsigned int>(signedValue);
2009 }
2010 }
2011
josh minorba424d22019-11-13 10:55:17 -06002012 desc = SliceDescriptor(begin, size);
2013
James Ward58dec6b2020-09-11 17:32:44 +01002014 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06002015
James Conroy05102392020-06-24 15:39:55 +01002016 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
Mike Kelly377fb212023-01-10 15:55:28 +00002017
2018 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2019 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
josh minorba424d22019-11-13 10:55:17 -06002020 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2021
2022 // register the input connection slots for the layer, connections are made after all layers have been created
2023 // only the tensors for the inputs are relevant, exclude the const tensors
2024 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2025 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2026
2027 // register the output connection slots for the layer, connections are made after all layers have been created
2028 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2029 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2030}
2031
Kevin May7d96b162021-02-03 17:38:41 +00002032void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01002033{
2034 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002035 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2036 const auto* options = operatorPtr->builtin_options.AsSoftmaxOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01002037
2038 SoftmaxDescriptor desc;
2039 desc.m_Beta = options->beta;
2040
2041 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2042 CHECK_VALID_SIZE(inputs.size(), 1);
2043 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2044 CHECK_VALID_SIZE(outputs.size(), 1);
2045
James Ward58dec6b2020-09-11 17:32:44 +01002046 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01002047 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
2048
Mike Kelly377fb212023-01-10 15:55:28 +00002049 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
telsoa01c577f2c2018-08-31 09:22:23 +01002050 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2051
2052 // register the input connection slots for the layer, connections are made after all layers have been created
2053 // only the tensors for the inputs are relevant, exclude the const tensors
2054 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2055 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2056
2057 // register the output connection slots for the layer, connections are made after all layers have been created
2058 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2059 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2060}
2061
Teresa Charlinfd33a692022-06-29 15:35:57 +01002062void TfLiteParserImpl::ParseLogSoftmax(size_t subgraphIndex, size_t operatorIndex)
2063{
2064 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2065
2066 LogSoftmaxDescriptor desc;
2067
2068 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2069 CHECK_VALID_SIZE(inputs.size(), 1);
2070 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2071 CHECK_VALID_SIZE(outputs.size(), 1);
2072
2073 auto layerName = fmt::format("LogSoftmax:{}:{}", subgraphIndex, operatorIndex);
2074 IConnectableLayer* const layer = m_Network->AddLogSoftmaxLayer(desc, layerName.c_str());
2075
Mike Kelly377fb212023-01-10 15:55:28 +00002076 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Teresa Charlinfd33a692022-06-29 15:35:57 +01002077 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2078
2079 // register the input connection slots for the layer, connections are made after all layers have been created
2080 // only the tensors for the inputs are relevant, exclude the const tensors
2081 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2082 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2083
2084 // register the output connection slots for the layer, connections are made after all layers have been created
2085 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2086 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2087}
2088
Kevin May7d96b162021-02-03 17:38:41 +00002089void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002090{
2091 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2092
2093 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2094 CHECK_VALID_SIZE(inputs.size(), 3);
2095
2096 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2097 CHECK_VALID_SIZE(outputs.size(), 1);
2098
Mike Kelly377fb212023-01-10 15:55:28 +00002099 armnn::TensorInfo blockShapeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002100 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2101
Mike Kelly377fb212023-01-10 15:55:28 +00002102 armnn::TensorInfo padListTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002103 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2104
2105 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
2106 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
2107
2108 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
2109 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
2110
2111 size_t step = 2;
2112 std::vector<std::pair<unsigned int, unsigned int>> padList;
2113 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
2114 {
2115 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
2116 }
2117
2118 armnn::SpaceToBatchNdDescriptor desc;
2119 desc.m_BlockShape = blockShape;
2120 desc.m_PadList = padList;
2121 desc.m_DataLayout = armnn::DataLayout::NHWC;
2122
James Ward58dec6b2020-09-11 17:32:44 +01002123 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002124
Mike Kelly377fb212023-01-10 15:55:28 +00002125 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01002126
2127 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
2128 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00002129
2130 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2131 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002132 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2133
2134 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2135 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2136
2137 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2138 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2139}
2140
Teresa Charlin3ab85482021-06-08 16:59:29 +01002141armnn::TensorInfo TfLiteParserImpl::OutputShapeOfSqueeze(std::vector<uint32_t> squeezeDims,
Mike Kelly0d77ae12022-01-07 17:42:27 +00002142 const armnn::TensorInfo& inputTensorInfo)
telsoa01c577f2c2018-08-31 09:22:23 +01002143{
Teresa Charlin3ab85482021-06-08 16:59:29 +01002144 CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
telsoa01c577f2c2018-08-31 09:22:23 +01002145 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2146
2147 if (inputTensorInfo.GetNumDimensions() > 4)
2148 {
2149 std::stringstream ss;
2150 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2151 << " shape:" << inputTensorInfo.GetShape() << " "
2152 << CHECK_LOCATION().AsString();
2153 throw ParseException(ss.str());
2154 }
2155
2156 if (squeezeDims.empty())
2157 {
2158 squeezeDims.assign(dimensionSequence,
2159 dimensionSequence+inputTensorInfo.GetNumDimensions());
2160 }
2161
2162 std::vector<uint32_t> outputDims;
2163 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2164 {
2165 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2166 auto currentDimension = inputTensorInfo.GetShape()[i];
2167 if (skipSqueeze || currentDimension != 1)
2168 {
2169 outputDims.push_back(currentDimension);
2170 }
2171 }
2172
2173 if (outputDims.size() > 4)
2174 {
2175 std::stringstream ss;
2176 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2177 << " shape:" << inputTensorInfo.GetShape() << " "
2178 << CHECK_LOCATION().AsString();
2179 throw ParseException(ss.str());
2180 }
2181
2182 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2183 outputDims.data());
2184
2185 // we need to preserve the tensor type and the quantization data as well
2186 TensorInfo outTensorInfo = inputTensorInfo;
2187 outTensorInfo.SetShape(outShape);
2188
2189 return outTensorInfo;
2190}
2191
Keith Davis0176fd82021-06-01 17:36:32 +01002192void TfLiteParserImpl::ParseShape(size_t subgraphIndex, size_t operatorIndex)
2193{
2194 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2195
2196 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2197 CHECK_VALID_SIZE(inputs.size(), 1);
2198 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2199 CHECK_VALID_SIZE(outputs.size(), 1);
2200
2201 auto layerName = fmt::format("Shape:{}:{}", subgraphIndex, operatorIndex);
2202
2203 IConnectableLayer* layer = m_Network->AddShapeLayer(layerName.c_str());
2204 ARMNN_ASSERT(layer != nullptr);
2205
Mike Kelly377fb212023-01-10 15:55:28 +00002206 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Keith Davis0176fd82021-06-01 17:36:32 +01002207 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2208
2209 // Check if output tensor type is Signed32 or Signed64
2210 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
2211 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
2212 {
2213 throw ParseException(
2214 fmt::format(
2215 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
2216 CHECK_LOCATION().AsString()));
2217 }
2218
2219 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2220 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2221
2222 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2223 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2224}
2225
Kevin May7d96b162021-02-03 17:38:41 +00002226void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01002227{
2228 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2229
2230 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2231 CHECK_VALID_SIZE(inputs.size(), 1);
2232
2233 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2234 CHECK_VALID_SIZE(outputs.size(), 1);
2235
2236 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2237 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002238 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01002239
Mike Kelly377fb212023-01-10 15:55:28 +00002240 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Teresa Charlin3ab85482021-06-08 16:59:29 +01002241
2242 std::vector<uint32_t> squeezeDim;
2243 // A single negative dim index is interpreted as a negative index in python
2244 // Meaning the index will be the shape size plus the negative index value
2245 if (options->squeeze_dims.size() == 1 && options->squeeze_dims[0] < 0)
2246 {
2247 int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
2248 squeezeDim.push_back(static_cast<uint32_t>(dim));
2249 }
2250 else
2251 {
2252 squeezeDim = AsUnsignedVector(options->squeeze_dims);
2253 }
2254
2255 armnn::TensorInfo outputTensorInfo = TfLiteParserImpl::OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
2256
James Conroy05102392020-06-24 15:39:55 +01002257 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01002258
2259 ReshapeDescriptor reshapeDesc;
2260 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
2261
Mike Kellyb2293702023-02-14 17:16:12 +00002262 auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
2263 m_TensorInfos[outputTensorIds[0]] = outputTensorInfo;
2264
telsoa01c577f2c2018-08-31 09:22:23 +01002265 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002266 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01002267 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2268
2269 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2270 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2271
2272 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2273 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2274}
2275
Kevin May7d96b162021-02-03 17:38:41 +00002276void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002277{
2278 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2279
2280 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2281 CHECK_VALID_SIZE(inputs.size(), 4);
2282
2283 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2284 CHECK_VALID_SIZE(outputs.size(), 1);
2285
Mike Kelly0d77ae12022-01-07 17:42:27 +00002286 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2287 const auto* options = operatorPtr->builtin_options.AsStridedSliceOptions();
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002288
2289 StridedSliceDescriptor desc;
2290 desc.m_BeginMask = options->begin_mask;
2291 desc.m_EllipsisMask = options->ellipsis_mask;
2292 desc.m_EndMask = options->end_mask;
2293 desc.m_NewAxisMask = options->new_axis_mask;
2294 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
2295 desc.m_DataLayout = armnn::DataLayout::NHWC;
2296
Mike Kelly377fb212023-01-10 15:55:28 +00002297 armnn::TensorInfo beginTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002298 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2299
2300 std::vector<int> begin(beginTensorInfo.GetNumElements());
2301 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
2302
Mike Kelly377fb212023-01-10 15:55:28 +00002303 armnn::TensorInfo endTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002304 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2305
2306 std::vector<int> end(endTensorInfo.GetNumElements());
2307 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
2308
Mike Kelly377fb212023-01-10 15:55:28 +00002309 armnn::TensorInfo strideTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 3);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002310 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
2311
2312 std::vector<int> stride(strideTensorInfo.GetNumElements());
2313 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
2314
2315 desc.m_Begin = begin;
2316 desc.m_End = end;
2317 desc.m_Stride = stride;
2318
James Ward58dec6b2020-09-11 17:32:44 +01002319 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002320 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002321 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002322
Mike Kelly377fb212023-01-10 15:55:28 +00002323 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002324 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2325
2326 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2327 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2328
2329 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2330 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2331}
2332
Kevin May7d96b162021-02-03 17:38:41 +00002333void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002334{
2335 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2336
Mike Kelly0d77ae12022-01-07 17:42:27 +00002337 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2338 const auto* options = operatorPtr->builtin_options.AsSubOptions();
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002339
2340 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2341 CHECK_VALID_SIZE(inputs.size(), 2);
2342
2343 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2344 CHECK_VALID_SIZE(outputs.size(), 1);
2345
Mike Kelly377fb212023-01-10 15:55:28 +00002346 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2347 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002348
James Ward58dec6b2020-09-11 17:32:44 +01002349 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002350 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002351 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002352
Mike Kelly377fb212023-01-10 15:55:28 +00002353 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002354 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2355
2356 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002357 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002358
2359 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2360
2361 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2362 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2363}
2364
Kevin May7d96b162021-02-03 17:38:41 +00002365void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302366{
2367 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2368
Mike Kelly0d77ae12022-01-07 17:42:27 +00002369 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2370 const auto* options = operatorPtr->builtin_options.AsDivOptions();
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302371
2372 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2373 CHECK_VALID_SIZE(inputs.size(), 2);
2374
2375 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2376 CHECK_VALID_SIZE(outputs.size(), 1);
2377
Mike Kelly377fb212023-01-10 15:55:28 +00002378 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2379 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302380
James Ward58dec6b2020-09-11 17:32:44 +01002381 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302382 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002383 ARMNN_ASSERT(layer != nullptr);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302384
Mike Kelly377fb212023-01-10 15:55:28 +00002385 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302386 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2387
2388 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002389 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302390 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2391
2392 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2393 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2394}
2395
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002396void TfLiteParserImpl::ParseFloorDiv(size_t subgraphIndex, size_t operatorIndex)
2397{
2398 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2399
2400 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2401 CHECK_VALID_SIZE(inputs.size(), 2);
2402
2403 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2404 CHECK_VALID_SIZE(outputs.size(), 1);
2405
Mike Kelly377fb212023-01-10 15:55:28 +00002406 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2407 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002408
2409 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
2410 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
2411 ARMNN_ASSERT(layer != nullptr);
2412
Mike Kelly377fb212023-01-10 15:55:28 +00002413 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002414 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2415
2416 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2417 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2418 layer = AddFusedFloorLayer(layer, 0);
2419
2420 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2421 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2422}
2423
Kevin May7d96b162021-02-03 17:38:41 +00002424void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002425{
2426 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2427
Mike Kelly0d77ae12022-01-07 17:42:27 +00002428 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2429 const auto* options = operatorPtr->builtin_options.AsAddOptions();
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002430
2431 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2432 CHECK_VALID_SIZE(inputs.size(), 2);
2433
2434 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2435 CHECK_VALID_SIZE(outputs.size(), 1);
2436
Mike Kelly377fb212023-01-10 15:55:28 +00002437 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2438 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002439
James Ward58dec6b2020-09-11 17:32:44 +01002440 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002441 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002442 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002443
Mike Kelly377fb212023-01-10 15:55:28 +00002444 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002445 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2446
2447 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002448 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002449 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2450
2451 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2452 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2453}
2454
Kevin May7d96b162021-02-03 17:38:41 +00002455void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002456{
2457 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2458
Mike Kelly0d77ae12022-01-07 17:42:27 +00002459 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2460 const auto* options = operatorPtr->builtin_options.AsMulOptions();
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002461
2462 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2463 CHECK_VALID_SIZE(inputs.size(), 2);
2464
2465 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2466 CHECK_VALID_SIZE(outputs.size(), 1);
2467
Mike Kelly377fb212023-01-10 15:55:28 +00002468 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2469 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002470
James Ward58dec6b2020-09-11 17:32:44 +01002471 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002472 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002473 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002474
Mike Kelly377fb212023-01-10 15:55:28 +00002475 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002476 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2477
2478 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002479 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002480 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2481
2482 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2483 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2484}
2485
Kevin May7d96b162021-02-03 17:38:41 +00002486void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002487{
2488 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2489
2490 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2491
2492 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2493 CHECK_VALID_SIZE(outputs.size(), 1);
2494
Mike Kelly377fb212023-01-10 15:55:28 +00002495 armnn::TensorInfo dimTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002496 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2497
2498 armnn::MeanDescriptor desc;
2499 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
2500 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
2501 desc.m_Axis = axis;
2502
Mike Kelly377fb212023-01-10 15:55:28 +00002503 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002504 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002505
2506 desc.m_KeepDims =
Mike Kelly377fb212023-01-10 15:55:28 +00002507 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002508 true : false;
2509
James Ward58dec6b2020-09-11 17:32:44 +01002510 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002511 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002512 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002513
Mike Kelly377fb212023-01-10 15:55:28 +00002514 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002515 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2516
2517 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2518 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2519
2520 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2521 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2522}
2523
Kevin May7d96b162021-02-03 17:38:41 +00002524void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002525{
2526 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2527
Kevin May7d96b162021-02-03 17:38:41 +00002528 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002529
Kevin May7d96b162021-02-03 17:38:41 +00002530 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002531 CHECK_VALID_SIZE(outputs.size(), 1);
2532
Mike Kelly377fb212023-01-10 15:55:28 +00002533 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2534 armnn::TensorInfo padTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002535
Mike Kelly0d77ae12022-01-07 17:42:27 +00002536 std::vector<unsigned int> padBuffer = GetUIntBuffer(padTensorInfo, m_Model, inputs[1]->buffer);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002537
2538 size_t step = 2;
2539 armnn::PadDescriptor desc;
Mike Kelly0d77ae12022-01-07 17:42:27 +00002540 auto opcode = GetOpCode(m_Model, subgraphIndex, operatorIndex);
2541
2542 if (opcode == tflite::BuiltinOperator_PAD)
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002543 {
Mike Kelly0d77ae12022-01-07 17:42:27 +00002544 CHECK_VALID_SIZE(inputs.size(), 2);
2545
2546 if (inputTensorInfo.IsQuantized())
2547 {
2548 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2549 }
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002550 }
Mike Kelly0d77ae12022-01-07 17:42:27 +00002551 else if (opcode == tflite::BuiltinOperator_PADV2)
2552 {
2553 CHECK_VALID_SIZE(inputs.size(), 3);
2554
Mike Kelly377fb212023-01-10 15:55:28 +00002555 armnn::TensorInfo padValueTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002556
2557 if (padValueTensorInfo.GetNumElements() != 1)
2558 {
2559 ARMNN_THROW_PARSE_EXCEPTION("Multiple padding values are not supported in PADV2");
2560 }
2561 BufferRawPtr padValueBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2562
2563 // Get the pad value from the input tensor
2564 if (padValueBufferPtr->data.size() > 0)
2565 {
2566 switch (padValueTensorInfo.GetDataType())
2567 {
2568 case armnn::DataType::Float32:
2569 {
2570 std::vector<float> padValueBuffer(padValueTensorInfo.GetNumElements());
2571 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2572 desc.m_PadValue = padValueBuffer[0];
2573 break;
2574 }
2575 case armnn::DataType::QAsymmU8:
2576 {
2577 std::vector<uint8_t> padValueBuffer(padValueTensorInfo.GetNumElements());
2578 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2579 desc.m_PadValue = armnn::Dequantize<uint8_t>(padValueBuffer[0],
2580 padValueTensorInfo.GetQuantizationScale(),
2581 padValueTensorInfo.GetQuantizationOffset());
2582 break;
2583 }
2584 case armnn::DataType::QAsymmS8:
2585 case armnn::DataType::QSymmS8:
2586 {
2587 std::vector<int8_t> padValueBuffer(padValueTensorInfo.GetNumElements());
2588 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2589 desc.m_PadValue = armnn::Dequantize<int8_t>(padValueBuffer[0],
2590 padValueTensorInfo.GetQuantizationScale(),
2591 padValueTensorInfo.GetQuantizationOffset());
2592 break;
2593 }
2594 default: ARMNN_THROW_PARSE_EXCEPTION("Unsupported DataType");
2595 }
2596 }
2597 else if (inputTensorInfo.IsQuantized())
2598 {
2599 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2600 }
2601 }
2602
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002603 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2604 {
2605 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2606 }
2607
Mike Kelly0d77ae12022-01-07 17:42:27 +00002608 auto layerName = (opcode == tflite::BuiltinOperator_PAD) ? fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex)
2609 : fmt::format("PadV2:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01002610
2611 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
2612 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00002613 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002614 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2615
2616 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2617 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2618
2619 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2620 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2621}
2622
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002623void TfLiteParserImpl::ParseMirrorPad(size_t subgraphIndex, size_t operatorIndex)
2624{
2625 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2626
2627 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2628 CHECK_VALID_SIZE(inputs.size(), 2);
2629
2630 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2631 CHECK_VALID_SIZE(outputs.size(), 1);
2632
Mike Kelly377fb212023-01-10 15:55:28 +00002633 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002634
Mike Kelly377fb212023-01-10 15:55:28 +00002635 armnn::TensorInfo padTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002636 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2637
2638 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
2639 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
2640
2641 size_t step = 2;
2642 armnn::PadDescriptor desc;
2643 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2644 {
2645 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2646 }
2647
2648 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2649 const auto* options = operatorPtr->builtin_options.AsMirrorPadOptions();
2650
2651 if (options->mode == tflite::MirrorPadMode_REFLECT)
2652 {
2653 desc.m_PaddingMode = PaddingMode::Reflect;
2654 }
2655 else if (options->mode == tflite::MirrorPadMode_SYMMETRIC)
2656 {
2657 desc.m_PaddingMode = PaddingMode::Symmetric;
2658 }
2659 else
2660 {
2661 ARMNN_THROW_PARSE_EXCEPTION("PaddingMode must be either REFLECT or SYMMETRIC");
2662 }
2663
2664 // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
2665 // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
2666 auto inputShape = inputTensorInfo.GetShape();
2667 auto padList = desc.m_PadList;
2668
2669 const unsigned int isReflect = static_cast<unsigned int>(desc.m_PaddingMode == PaddingMode::Reflect);
2670 for(unsigned int i = 0; i < padList.size(); ++i)
2671 {
2672 if(padList.at(i).first > (inputShape[i] - isReflect) ||
2673 padList.at(i).second > (inputShape[i] - isReflect))
2674 {
2675 ARMNN_THROW_PARSE_EXCEPTION("Padding values must be less (Reflect) or "
2676 "equal (Symmetric) to the dimension size.");
2677 }
2678 }
2679
2680 auto layerName = fmt::format("MirrorPad:{}:{}", subgraphIndex, operatorIndex);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002681
2682 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
2683 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00002684 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002685 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2686
2687 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2688 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2689
2690 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2691 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2692}
2693
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002694void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
2695{
2696 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2697
2698 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2699 CHECK_VALID_SIZE(inputs.size(), 2);
2700
2701 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2702 CHECK_VALID_SIZE(outputs.size(), 1);
2703
2704 auto layerName = fmt::format("Prelu:{}:{}", subgraphIndex, operatorIndex);
2705
Mike Kelly377fb212023-01-10 15:55:28 +00002706 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2707 armnn::TensorInfo alphaTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002708
2709 IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
2710 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00002711
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002712
2713 if (IsConstTensor(inputs[1]))
2714 {
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002715 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawaratbf99b5f2021-05-27 09:55:43 +01002716 armnn::IInputSlot* slot = &(layer->GetInputSlot(0));
2717 RegisterConsumerOfTensor(subgraphIndex, inputTensorIndexes[0], slot);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002718
Mike Kelly5880b912022-01-28 16:18:54 +00002719 auto alphaTensorAndData = CreateConstTensorNonPermuted(inputs[1], alphaTensorInfo,
2720 inputTensorInfo.GetDataType());
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002721 std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name);
2722 IConnectableLayer* constLayer =
Mike Kelly5880b912022-01-28 16:18:54 +00002723 m_Network->AddConstantLayer(alphaTensorAndData.first, constLayerName.c_str());
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002724 ARMNN_ASSERT(constLayer != nullptr);
2725
2726 constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
2727 constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
2728 RegisterOutputSlots(subgraphIndex,
2729 VIRTUAL_OPERATOR_ID,
2730 constLayer,
2731 { inputTensorIndexes[1] });
2732 }
2733 else
2734 {
2735 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2736 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIndexes);
2737 }
2738
Mike Kelly377fb212023-01-10 15:55:28 +00002739 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
2740 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2741 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2742
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002743 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2744 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2745}
2746
Kevin May7d96b162021-02-03 17:38:41 +00002747void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan66dedc72019-12-10 16:32:07 +00002748{
2749 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2750
2751 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2752 CHECK_VALID_SIZE(inputs.size(), 1);
2753
2754 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2755 CHECK_VALID_SIZE(outputs.size(), 1);
2756
James Ward58dec6b2020-09-11 17:32:44 +01002757 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002758
2759 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002760 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002761
Mike Kelly377fb212023-01-10 15:55:28 +00002762 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan66dedc72019-12-10 16:32:07 +00002763 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2764
2765 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2766 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2767
2768 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2769 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2770}
Finn Williamsc42c3842019-01-22 14:18:11 +00002771
Kevin May7d96b162021-02-03 17:38:41 +00002772void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002773{
Finn Williamsc42c3842019-01-22 14:18:11 +00002774 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01002775}
2776
Kevin May7d96b162021-02-03 17:38:41 +00002777void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002778{
Finn Williamsc42c3842019-01-22 14:18:11 +00002779 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
2780}
Sadik Armagan58f39192018-09-17 14:14:39 +01002781
Kevin May7d96b162021-02-03 17:38:41 +00002782void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan12239e72020-05-27 11:06:17 +01002783{
Jan Eilers2f746b32020-07-28 14:00:06 +01002784 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01002785}
2786
Kevin May7d96b162021-02-03 17:38:41 +00002787void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsc42c3842019-01-22 14:18:11 +00002788{
2789 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
2790}
2791
Kevin May7d96b162021-02-03 17:38:41 +00002792void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd99851762019-04-09 09:37:38 +01002793{
2794 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
2795}
2796
Kevin May7d96b162021-02-03 17:38:41 +00002797void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
Matthew Sloyan7515d072020-12-16 12:50:01 +00002798{
2799 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
2800}
2801
Kevin May7d96b162021-02-03 17:38:41 +00002802void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
Jan Eilers2f746b32020-07-28 14:00:06 +01002803{
2804 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
2805}
Finn Williamsc42c3842019-01-22 14:18:11 +00002806
Kevin May7d96b162021-02-03 17:38:41 +00002807void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
Finn Williamsc42c3842019-01-22 14:18:11 +00002808{
2809 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002810 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00002811 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01002812
2813 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2814 CHECK_VALID_SIZE(inputs.size(), 1);
2815
2816 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2817 CHECK_VALID_SIZE(outputs.size(), 1);
2818
James Ward58dec6b2020-09-11 17:32:44 +01002819 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01002820 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00002821 activationDesc.m_Function = activationType;
2822
2823 switch (activationType)
2824 {
2825 case ActivationFunction::ReLu:
2826 {
James Ward58dec6b2020-09-11 17:32:44 +01002827 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002828 break;
2829 }
2830 case ActivationFunction::BoundedReLu:
2831 {
James Ward58dec6b2020-09-11 17:32:44 +01002832 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002833 activationDesc.m_A = 6.0f;
2834 activationDesc.m_B = 0.0f;
2835 break;
2836 }
2837 case ActivationFunction::Sigmoid:
2838 {
James Ward58dec6b2020-09-11 17:32:44 +01002839 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002840 break;
2841 }
Nina Drozd99851762019-04-09 09:37:38 +01002842 case ActivationFunction::TanH:
2843 {
James Ward58dec6b2020-09-11 17:32:44 +01002844 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01002845 activationDesc.m_A = 1.0f;
2846 activationDesc.m_B = 1.0f;
2847 break;
2848 }
Sadik Armagan12239e72020-05-27 11:06:17 +01002849 case ActivationFunction::LeakyReLu:
2850 {
James Ward58dec6b2020-09-11 17:32:44 +01002851 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002852 const auto* options = operatorPtr->builtin_options.AsLeakyReluOptions();
Sadik Armagan12239e72020-05-27 11:06:17 +01002853 activationDesc.m_A = options->alpha;
2854 break;
2855 }
Matthew Sloyan7515d072020-12-16 12:50:01 +00002856 case ActivationFunction::Elu:
2857 {
2858 layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
2859 activationDesc.m_A = 1.0f;
2860 break;
2861 }
Jan Eilers2f746b32020-07-28 14:00:06 +01002862 case ActivationFunction::HardSwish:
Matthew Sloyan7515d072020-12-16 12:50:01 +00002863 {
James Ward58dec6b2020-09-11 17:32:44 +01002864 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01002865 break;
Matthew Sloyan7515d072020-12-16 12:50:01 +00002866 }
Finn Williamsc42c3842019-01-22 14:18:11 +00002867 default:
2868 {
2869 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002870 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
2871 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00002872 }
2873 }
2874
2875 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01002876
Mike Kelly377fb212023-01-10 15:55:28 +00002877 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan58f39192018-09-17 14:14:39 +01002878 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2879
2880 // register the input connection slots for the layer, connections are made after all layers have been created
2881 // only the tensors for the inputs are relevant, exclude the const tensors
2882 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2883 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2884
2885 // register the output connection slots for the layer, connections are made after all layers have been created
2886 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2887 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2888}
Mike Kelly0d77ae12022-01-07 17:42:27 +00002889armnn::TensorInfo TfLiteParserImpl::OutputShapeOfReshape(const armnn::TensorInfo& inputTensorInfo,
2890 const std::vector<int32_t>& targetDimsIn)
Sadikb94967b2018-09-19 15:30:00 +01002891{
2892 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2893 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2894
2895 if (stretchDim != targetDimsIn.end())
2896 {
2897 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2898 {
2899 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002900 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01002901 }
2902
2903 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002904 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01002905 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2906
2907 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2908 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2909 }
2910
2911 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2912
2913 TensorInfo reshapeInfo = inputTensorInfo;
2914 reshapeInfo.SetShape(outputShape);
2915
2916 return reshapeInfo;
2917}
2918
Kevin May7d96b162021-02-03 17:38:41 +00002919void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
Sadikb94967b2018-09-19 15:30:00 +01002920{
2921 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2922
2923 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002924
2925 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2926 CHECK_VALID_SIZE(outputs.size(), 1);
2927
Mike Kelly0d77ae12022-01-07 17:42:27 +00002928 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2929 const auto* options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002930 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002931
Mike Kelly377fb212023-01-10 15:55:28 +00002932 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
kevmay0171972a82018-12-17 14:28:03 +00002933 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01002934 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00002935
Jan Eilersbac9b352020-07-13 13:40:24 +01002936 // Extracting new shape for the output
2937 // There are two ways it can be passed
2938 // * First is to define the target shape in the operator built-in options
2939 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00002940 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01002941 bool targetShapeFound = false;
2942 // Check if built-in options were given
2943 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00002944 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002945 // make sure the parameter is given
2946 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00002947 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002948 targetShape = options->new_shape;
2949 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00002950 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002951 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002952
2953 // If there is no built-in option given or if the built-in new_shape parameter was empty
2954 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00002955 {
Teresa Charlin6a056a42021-12-01 10:25:43 +00002956 // Check for a second input tensor
2957 if (inputs.size() > 1 && inputs[1] != nullptr)
Jan Eilersbac9b352020-07-13 13:40:24 +01002958 {
2959 if (inputs[1]->is_variable)
2960 {
2961 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
2962 }
2963
2964 if (inputs[1]->shape.size() != 1)
2965 {
2966 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
2967 }
2968
2969 if (inputs[1]->type != tflite::TensorType_INT32)
2970 {
2971 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
2972 }
2973
Teresa Charlin6a056a42021-12-01 10:25:43 +00002974 // Extract target shape from input
2975 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2976 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
Cathal Corbettd2f73232021-12-10 13:38:52 +00002977 if (values)
Sadik Armagan19a1c032021-01-20 12:17:00 +00002978 {
Cathal Corbettd2f73232021-12-10 13:38:52 +00002979 for (int i = 0; i < inputs[1]->shape[0]; ++i)
2980 {
2981 targetShape.push_back(values[i]);
2982 }
Sadik Armagan19a1c032021-01-20 12:17:00 +00002983 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00002984 else
Jan Eilersbac9b352020-07-13 13:40:24 +01002985 {
Cathal Corbettd2f73232021-12-10 13:38:52 +00002986 try
2987 {
2988 // We attempt to infer during Runtime.
Mike Kelly04d82292023-01-19 18:29:40 +00002989 TensorShape reshapeShapes = ToTensorInfo(inputs[1]).GetShape();
2990
2991 if (reshapeShapes[0] == actualOutputTensorInfo.GetNumDimensions())
2992 {
2993 for (unsigned int i = 0; i < actualOutputTensorInfo.GetShape().GetNumDimensions(); ++i)
2994 {
2995 targetShape.push_back(actualOutputTensorInfo.GetShape()[i]);
2996 }
2997 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00002998 // The parser only supports shape (batch, -1) or (-1) for non-constant shape input.
Mike Kelly04d82292023-01-19 18:29:40 +00002999 else if (reshapeShapes[0] > 2)
Cathal Corbettd2f73232021-12-10 13:38:52 +00003000 {
3001 throw ParseException(fmt::format("Invalid input shape '{}' in Reshape layer '{}' {}. "
3002 "When inferring during runtime, the parser only supports "
3003 "shape (batch, -1) or (-1) for target shape input.",
3004 reshapeShapes[0],
3005 layerName,
3006 CHECK_LOCATION().AsString()));
3007 }
Mike Kelly04d82292023-01-19 18:29:40 +00003008 else
Cathal Corbettd2f73232021-12-10 13:38:52 +00003009 {
Mike Kelly04d82292023-01-19 18:29:40 +00003010 const int32_t numInputElements = inputTensorInfo.GetNumElements();
3011 const int32_t inputTensorShape = inputTensorInfo.GetShape()[0];
3012 if (reshapeShapes[0] == 1)
3013 {
3014 targetShape = {numInputElements};
3015 }
3016 else if (reshapeShapes[0] == 2)
3017 {
3018 targetShape = {inputTensorShape, numInputElements / inputTensorShape};
3019 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00003020 }
3021 }
3022 catch (const std::exception& exc)
3023 {
3024 ARMNN_THROW_PARSE_EXCEPTION("Failed attempt to infer during runtime the target shape input for "
3025 "Reshape operation. Reshape operator target shape input buffer data "
3026 "is null. " << exc.what());
3027 }
Jan Eilersbac9b352020-07-13 13:40:24 +01003028 }
3029 }
3030 else
Derek Lambertic9e52792020-03-11 11:42:26 +00003031 {
3032 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
3033 "At least one method required");
3034 }
Derek Lambertic9e52792020-03-11 11:42:26 +00003035 }
3036
kevmay0171972a82018-12-17 14:28:03 +00003037 armnn::TensorInfo reshapeOutputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00003038 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01003039
kevmay0171972a82018-12-17 14:28:03 +00003040 // Check for valid input size and that reshape parameters equal output shape
Cathal Corbett2b922e22022-09-23 15:49:24 +01003041 // The output shape can be provided to us in 2 ways:
3042 // 1. through the normal 'shape' parameter given by outputs[indx]->shape
3043 // 2. through additional parameter 'shape_signature' given by outputs[indx]->buffer.
3044 // This parameter can sometimes contain -1 value not visible in the 'shape' parameter.
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00003045 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
3046 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00003047 {
Cathal Corbett2b922e22022-09-23 15:49:24 +01003048 // Attempt to extract output shape from secondary 'shape_signature'
3049 // parameter and try to CheckShape() with this param.
3050 std::vector<int32_t> secondaryOutputTargetShape = outputs[0]->shape_signature;
3051
3052 // if outputs[0]->shape_signature contain a -1 value, we need to compute its actual value
3053 // from reshape input in order to correctly verify reshape parameters equal output shape
3054 armnn::TensorInfo secondaryReshapeOutputTensorInfo =
3055 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, secondaryOutputTargetShape);
3056
3057 if (!CheckShape(reshapeOutputTensorShape, secondaryReshapeOutputTensorInfo.GetShape()))
3058 {
3059 std::stringstream ss;
3060 ss << "New shape defined in reshape parameters "
3061 << reshapeOutputTensorShape
3062 << " does not equal output shape "
3063 << actualOutputTensorInfo.GetShape()
3064 << ": "
3065 << CHECK_LOCATION().AsString();
3066 throw ParseException(ss.str());
3067 }
kevmay0171972a82018-12-17 14:28:03 +00003068 }
Mike Kelly377fb212023-01-10 15:55:28 +00003069 auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
kevmay0171972a82018-12-17 14:28:03 +00003070
Sadikb94967b2018-09-19 15:30:00 +01003071 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00003072 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Mike Kelly377fb212023-01-10 15:55:28 +00003073 m_TensorInfos[outputTensorIds[0]] = reshapeOutputTensorInfo;
Sadikb94967b2018-09-19 15:30:00 +01003074
Sadikb94967b2018-09-19 15:30:00 +01003075 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003076 ARMNN_ASSERT(layer != nullptr);
kevmay0171972a82018-12-17 14:28:03 +00003077 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01003078
3079 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3080 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3081
3082 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3083 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3084}
3085
Kevin May7d96b162021-02-03 17:38:41 +00003086void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003087{
Sadik Armagana3b31f02019-12-05 09:08:53 +00003088 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
3089}
3090
Kevin May7d96b162021-02-03 17:38:41 +00003091void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagana3b31f02019-12-05 09:08:53 +00003092{
3093 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
3094}
3095
Kevin May7d96b162021-02-03 17:38:41 +00003096void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
Sadik Armagana3b31f02019-12-05 09:08:53 +00003097{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003098 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3099
3100 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3101 CHECK_VALID_SIZE(inputs.size(), 2);
3102
3103 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3104 CHECK_VALID_SIZE(outputs.size(), 1);
3105
Mike Kelly377fb212023-01-10 15:55:28 +00003106 armnn::TensorInfo sizeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003107
3108 // Data for the parsed tensor args (size) must be stored locally.
3109 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
3110
3111 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3112 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
3113
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01003114 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00003115 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003116 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01003117 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
3118 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003119
James Ward58dec6b2020-09-11 17:32:44 +01003120 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00003121
3122 switch (resizeMethod)
3123 {
3124 case ResizeMethod::Bilinear:
3125 {
James Ward58dec6b2020-09-11 17:32:44 +01003126 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00003127
3128 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3129 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
3130
David Monahan4a0c9b92020-05-30 09:48:39 +01003131 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00003132 break;
3133 }
3134 case ResizeMethod::NearestNeighbor:
3135 {
James Ward58dec6b2020-09-11 17:32:44 +01003136 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00003137 break;
3138 }
3139 default:
3140 {
3141 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003142 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
3143 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00003144 }
3145 }
3146
Mike Kelly377fb212023-01-10 15:55:28 +00003147 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01003148
3149 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
3150 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00003151 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
3152 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003153 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3154
3155 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3156 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3157
3158 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3159 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3160}
3161
Kevin May7d96b162021-02-03 17:38:41 +00003162void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan479045b2018-10-01 11:51:37 +01003163{
3164 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3165
Mike Kelly0d77ae12022-01-07 17:42:27 +00003166 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3167 const auto* options = operatorPtr->builtin_options.AsConcatenationOptions();
Sadik Armagan479045b2018-10-01 11:51:37 +01003168
3169 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
3170
3171 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3172 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Mike Kelly377fb212023-01-10 15:55:28 +00003173 auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex);
3174
Sadik Armagan479045b2018-10-01 11:51:37 +01003175 CHECK_VALID_SIZE(outputs.size(), 1);
3176
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003177 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
Mike Kelly377fb212023-01-10 15:55:28 +00003178 uint32_t inputRank = InputTensorInfo(subgraphIndex, operatorIndex, 0).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01003179
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003180 const unsigned int concatDimInput = static_cast<unsigned int>(
Mike Kelly377fb212023-01-10 15:55:28 +00003181 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01003182
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003183 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
3184 concatDescriptor.SetConcatAxis(concatDimInput);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003185 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01003186
3187 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
3188 {
Mike Kelly377fb212023-01-10 15:55:28 +00003189 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, viewIndex);
Sadik Armagan479045b2018-10-01 11:51:37 +01003190
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003191 // This set up concatDescriptor view origin
3192 armnnUtils::ProcessConcatInputTensorInfo(
Mike Kelly377fb212023-01-10 15:55:28 +00003193 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01003194 }
3195
James Ward58dec6b2020-09-11 17:32:44 +01003196 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01003197
Jim Flynn906f9462019-05-10 13:55:21 +01003198 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003199 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00003200 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003201 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01003202
James Conroy05102392020-06-24 15:39:55 +01003203 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003204 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01003205
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003206 // add fused activation layer
3207 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01003208
Sadik Armagan479045b2018-10-01 11:51:37 +01003209 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3210 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3211}
3212
Kevin May7d96b162021-02-03 17:38:41 +00003213void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003214{
3215 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3216
Mike Kelly0d77ae12022-01-07 17:42:27 +00003217 const auto& operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003218 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
3219
3220 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
3221
3222 FullyConnectedDescriptor desc;
3223 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01003224 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003225
3226 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3227 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3228 CHECK_VALID_SIZE(outputs.size(), 1);
3229
Mike Kelly377fb212023-01-10 15:55:28 +00003230 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003231
3232 // Fully Connected Layer accepts two dimensional weights input
3233 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
3234 if (weightsDimension != 2)
3235 {
3236 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003237 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
3238 "Node {}",
3239 weightsDimension,
3240 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003241 }
3242
Matthew Jackson74bf7da2019-08-16 16:51:42 +01003243 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01003244 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003245
Matthew Sloyan81beae32021-07-13 19:46:11 +01003246 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3247 // Add the first input tensor to the registration list
3248 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0]};
Mike Kelly377fb212023-01-10 15:55:28 +00003249 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003250
3251 desc.m_ConstantWeights = IsConstTensor(inputs[1]);
3252
Matthew Sloyan81beae32021-07-13 19:46:11 +01003253 // Add the weights input to the registration list, constant layers will be added by SetupConstantLayers if constant.
3254 tensorIndexesToRegister.emplace_back(inputTensorIndexes[1]);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003255
Mike Kelly0506ef02023-01-03 16:29:44 +00003256 if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
Mike Kelly5880b912022-01-28 16:18:54 +00003257 {
3258 m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
3259 }
3260
Finn Williamsd4fa5452021-03-01 12:31:41 +00003261 if (inputs.size() == 3)
3262 {
3263 desc.m_BiasEnabled = true;
Mike Kelly377fb212023-01-10 15:55:28 +00003264 armnn::TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Matthew Sloyan81beae32021-07-13 19:46:11 +01003265
3266 // Add the biases input to the registration list, constant layer will be added by SetupConstantLayers.
3267 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
Mike Kelly5880b912022-01-28 16:18:54 +00003268
Mike Kelly0506ef02023-01-03 16:29:44 +00003269 if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
Mike Kelly5880b912022-01-28 16:18:54 +00003270 {
3271 m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
3272 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00003273 }
3274
Matthew Sloyan81beae32021-07-13 19:46:11 +01003275 // Filters and biases are always passed to fully connected as inputs
3276 layer = m_Network->AddFullyConnectedLayer(desc, layerName.c_str());
Finn Williamsd4fa5452021-03-01 12:31:41 +00003277
3278 ARMNN_ASSERT(layer != nullptr);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003279
Finn Williamsd4fa5452021-03-01 12:31:41 +00003280 unsigned int startingSlotIndex = 0;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003281 if (inputTensorInfo.GetNumDimensions() > 2)
3282 {
3283 // Add reshape to flatten to 2D [batch_size, input_size],
3284 // where "input_size" corresponds to the number of inputs to the layer,
3285 // matching the second dimension of weights,
3286 // and "batch_size" is calculated by dividing the number of elements by "input_size".
3287 std::vector<unsigned int> reshapedDimensions(2);
3288 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
3289 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
3290
3291 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
3292 {
3293 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003294 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
3295 reshapedDimensions[1],
3296 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003297 }
3298
Mike Kelly377fb212023-01-10 15:55:28 +00003299 armnn::TensorInfo reshapedTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003300 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
Mike Kelly377fb212023-01-10 15:55:28 +00003301 inputTensorInfo = reshapedTensorInfo;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003302
James Ward58dec6b2020-09-11 17:32:44 +01003303 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Finn Williamsd4fa5452021-03-01 12:31:41 +00003304 armnn::ReshapeDescriptor reshapeDescriptor;
3305 reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
Mike Kelly04d82292023-01-19 18:29:40 +00003306 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor,
3307 reshapeLayerName.c_str());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003308
3309 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
3310 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
3311
3312 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
Finn Williamsd4fa5452021-03-01 12:31:41 +00003313 // Fc layer connects to the reshape layer, so we skip the first input slot when registering fc's input slots
3314 tensorIndexesToRegister.erase(tensorIndexesToRegister.begin());
3315 startingSlotIndex = 1;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003316 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00003317
3318 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister, startingSlotIndex);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003319
Mike Kelly377fb212023-01-10 15:55:28 +00003320 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromShapes(subgraphIndex, operatorIndex, layer, 0,
3321 { inputTensorInfo.GetShape(),
3322 filterTensorInfo.GetShape() });
Mike Kelly04d82292023-01-19 18:29:40 +00003323
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003324 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3325
Mike Kelly04d82292023-01-19 18:29:40 +00003326 if (outputTensorInfo.GetNumDimensions() > 2)
3327 {
3328 // Calculate reshape to flatten to 2D [batch_size, input_size]
3329 std::vector<unsigned int> reshapedDimensions(2);
3330 reshapedDimensions[1] = filterTensorInfo.GetShape()[0];
3331 reshapedDimensions[0] = outputTensorInfo.GetNumElements() / reshapedDimensions[1];
3332 armnn::TensorInfo reshapedOutputTensorInfo = outputTensorInfo;
3333 if (outputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
3334 {
3335 throw ParseException(
3336 fmt::format("Failed to deduce output tensor shape from filter size {} {}",
3337 reshapedDimensions[1],
3338 CHECK_LOCATION().AsString()));
3339 }
3340 reshapedOutputTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
3341 layer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
3342
3343 std::string reshapeLayerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
3344 layer = AddReshapeLayer(layer, 0, reshapeLayerName, outputTensorInfo);
3345 }
3346
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003347 // we need to add the activation layer and fortunately we don't need to care about the data layout
3348 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
3349 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003350
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003351 // register the output connection slots for the layer, connections are made after all layers have been created
3352 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3353 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
Mike Kelly04d82292023-01-19 18:29:40 +00003354
3355 m_TensorInfos[outputTensorIndexes[0]] = layer->GetOutputSlot(0).GetTensorInfo();
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003356}
3357
Kevin May7d96b162021-02-03 17:38:41 +00003358void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
keidav011b3e2ea2019-02-21 10:07:37 +00003359{
3360 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3361
Mike Kelly0d77ae12022-01-07 17:42:27 +00003362 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
keidav011b3e2ea2019-02-21 10:07:37 +00003363
3364 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3365 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3366 CHECK_VALID_SIZE(outputs.size(), 4);
3367
3368 // Obtain custom options from flexbuffers
3369 auto custom_options = operatorPtr->custom_options;
3370 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
3371
3372 // Obtain descriptor information from tf lite
3373 DetectionPostProcessDescriptor desc;
3374 desc.m_MaxDetections = m["max_detections"].AsUInt32();
3375 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
3376 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
3377 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
3378 desc.m_NumClasses = m["num_classes"].AsUInt32();
3379 desc.m_ScaleH = m["h_scale"].AsFloat();
3380 desc.m_ScaleW = m["w_scale"].AsFloat();
3381 desc.m_ScaleX = m["x_scale"].AsFloat();
3382 desc.m_ScaleY = m["y_scale"].AsFloat();
3383
keidav0107d58c72019-02-26 11:57:39 +00003384 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00003385 {
keidav0107d58c72019-02-26 11:57:39 +00003386 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00003387 }
3388 if (!(m["detections_per_class"].IsNull()))
3389 {
3390 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
3391 }
3392
3393 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
3394 {
3395 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
3396 "must be positive and less than or equal to 1.");
3397 }
3398
Mike Kelly377fb212023-01-10 15:55:28 +00003399 armnn::TensorInfo anchorTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003400 auto anchorTensorAndData = CreateConstTensorNonPermuted(inputs[2], anchorTensorInfo);
keidav011b3e2ea2019-02-21 10:07:37 +00003401
James Ward58dec6b2020-09-11 17:32:44 +01003402 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003403 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData,
keidav011b3e2ea2019-02-21 10:07:37 +00003404 layerName.c_str());
3405
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003406 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00003407
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003408 // The model does not specify the output shapes.
3409 // The output shapes are calculated from the max_detection and max_classes_per_detection.
3410 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
Mike Kelly377fb212023-01-10 15:55:28 +00003411 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox, 4 });
3412 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox });
3413 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox });
3414 m_OverriddenOutputShapes.push_back({ 1 });
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003415
keidav011b3e2ea2019-02-21 10:07:37 +00003416 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
3417 {
Mike Kelly377fb212023-01-10 15:55:28 +00003418 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverriddenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00003419 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
3420 }
3421
3422 // Register the input connection slots for the layer, connections are made after all layers have been created
3423 // only the tensors for the inputs are relevant, exclude the const tensors
3424 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3425 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3426
3427 // Register the output connection slots for the layer, connections are made after all layers have been created
3428 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3429 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
3430 outputTensorIndexes[1],
3431 outputTensorIndexes[2],
3432 outputTensorIndexes[3]});
3433}
3434
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003435/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
Kevin May7d96b162021-02-03 17:38:41 +00003436void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003437{
3438 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3439
3440 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3441 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3442 CHECK_VALID_SIZE(outputs.size(), 1);
3443
3444 if (inputs.size() < 1)
3445 {
3446 throw ParseException("Pack must have at least one input.");
3447 }
3448
3449 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3450 const auto* options = operatorPtr->builtin_options.AsPackOptions();
3451
3452 StackDescriptor desc;
3453 desc.m_Axis = static_cast<uint32_t>(options->axis);
3454 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
3455
3456 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00003457 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003458 desc.m_InputShape = inputTensorInfo.GetShape();
3459
James Ward58dec6b2020-09-11 17:32:44 +01003460 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003461 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
3462
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003463 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003464
Mike Kelly377fb212023-01-10 15:55:28 +00003465 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003466 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3467
3468 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3469 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
3470
3471 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3472 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3473}
3474
Mike Kelly5880b912022-01-28 16:18:54 +00003475void TfLiteParserImpl::ParseUnidirectionalSequenceLSTM(size_t subgraphIndex, size_t operatorIndex)
3476{
3477 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3478
3479 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3480 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3481
3482 if (inputs.size() < 2)
3483 {
3484 throw ParseException("UnidirectionalSequenceLSTM must have at least 2 input.");
3485 }
3486
3487 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3488 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
3489 const auto nodeParams = operatorPtr->builtin_options.AsUnidirectionalSequenceLSTMOptions();
3490 CHECK_SUPPORTED_FUSED_ACTIVATION(nodeParams, subgraphIndex, operatorIndex);
Mike Kelly377fb212023-01-10 15:55:28 +00003491 auto inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly5880b912022-01-28 16:18:54 +00003492 auto outputTensorInfo = ToTensorInfo(outputs[0]);
3493
3494 // Set the params structure for the AddUnidirectionalSequenceLstmLayer call
3495 // Please refer to each operand at
3496 // https://www.tensorflow.org/mlir/tfl_ops#tflunidirectional_sequence_lstm_tflunidirectionalsequencelstmop
3497 armnn::LstmInputParams params;
3498
3499 if (IsOptionalOperandPresent(operatorPtr->inputs[1]))
3500 {
3501 params.m_InputToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[1]].get(),
3502 inputTensorInfo).first;
3503 }
3504
3505 params.m_InputToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[2]].get(),
3506 inputTensorInfo).first;
3507 params.m_InputToCellWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[3]].get(),
3508 inputTensorInfo).first;
3509 params.m_InputToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[4]].get(),
3510 inputTensorInfo).first;
3511
3512 // Recurrent weight tensors of size {n_cell, n_output}
3513 if (IsOptionalOperandPresent(operatorPtr->inputs[5]))
3514 {
3515 params.m_RecurrentToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[5]].get(),
3516 inputTensorInfo).first;
3517 }
3518
3519 params.m_RecurrentToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[6]].get(),
3520 inputTensorInfo).first;
3521 params.m_RecurrentToCellWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[7]].get(),
3522 inputTensorInfo).first;
3523 params.m_RecurrentToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[8]].get(),
3524 inputTensorInfo).first;
3525
3526 // Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
3527 if (IsOptionalOperandPresent(operatorPtr->inputs[9]))
3528 {
3529 params.m_CellToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[9]].get(),
3530 inputTensorInfo).first;
3531 }
3532
3533 if (IsOptionalOperandPresent(operatorPtr->inputs[10]))
3534 {
3535 params.m_CellToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[10]].get(),
3536 inputTensorInfo).first;
3537 }
3538
3539 if (IsOptionalOperandPresent(operatorPtr->inputs[11]))
3540 {
3541 params.m_CellToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[11]].get(),
3542 inputTensorInfo).first;
3543 }
3544
3545 // Gates bias tensors of size {n_cell}
3546 if (IsOptionalOperandPresent(operatorPtr->inputs[12]))
3547 {
3548 params.m_InputGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[12]].get(),
3549 inputTensorInfo).first;
3550 }
3551
3552 params.m_ForgetGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[13]].get(),
3553 inputTensorInfo).first;
3554 params.m_CellBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[14]].get(),
3555 inputTensorInfo).first;
3556 params.m_OutputGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[15]].get(),
3557 inputTensorInfo).first;
3558
3559 // Projection weight tensor of size {n_output, n_cell}
3560 if (IsOptionalOperandPresent(operatorPtr->inputs[16]))
3561 {
3562 params.m_ProjectionWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[16]].get(),
3563 inputTensorInfo).first;
3564 }
3565 // Projection bias tensor of size {n_output}
3566 if (IsOptionalOperandPresent(operatorPtr->inputs[17]))
3567 {
3568 params.m_ProjectionBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[17]].get(),
3569 inputTensorInfo).first;
3570 }
3571
3572 // These state tensors are defined as variable tensors, and will be modified by this op.
3573 armnn::TensorInfo outputStateInInfo = ToTensorInfo(subgraphPtr->tensors[operatorPtr->inputs[18]].get());
3574 m_ConstantsToBeCreated.push_back(operatorPtr->inputs[18]);
3575 armnn::TensorInfo cellStateInInfo = ToTensorInfo(subgraphPtr->tensors[operatorPtr->inputs[19]].get());
3576 m_ConstantsToBeCreated.push_back(operatorPtr->inputs[19]);
3577
3578 // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix.
3579 if (inputs.size() >= 21 && IsOptionalOperandPresent(operatorPtr->inputs[20]))
3580 {
3581 params.m_InputLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[20]].get(),
3582 inputTensorInfo).first;
3583 }
3584
3585 if (inputs.size() >= 22 && IsOptionalOperandPresent(operatorPtr->inputs[21]))
3586 {
3587 params.m_ForgetLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[21]].get(),
3588 inputTensorInfo).first;
3589 }
3590
3591 if (inputs.size() >= 23 && IsOptionalOperandPresent(operatorPtr->inputs[22]))
3592 {
3593 params.m_CellLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[22]].get(),
3594 inputTensorInfo).first;
3595 }
3596
3597 if (inputs.size() >= 24 && IsOptionalOperandPresent(operatorPtr->inputs[23]))
3598 {
3599 params.m_OutputLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[23]].get(),
3600 inputTensorInfo).first;
3601 }
3602
3603 // set the layer descriptor
3604 armnn::UnidirectionalSequenceLstmDescriptor desc;
3605 desc.m_ActivationFunc = nodeParams->fused_activation_function;
3606 desc.m_ClippingThresCell = nodeParams->cell_clip;
3607 desc.m_ClippingThresProj = nodeParams->proj_clip;
3608 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr
3609 || params.m_RecurrentToInputWeights == nullptr
3610 || params.m_InputGateBias == nullptr);
3611 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr);
3612 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
3613 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr
3614 || params.m_ForgetLayerNormWeights != nullptr
3615 || params.m_CellLayerNormWeights != nullptr
3616 || params.m_OutputLayerNormWeights != nullptr);
3617 desc.m_TimeMajor = nodeParams->time_major;
3618
Mike Kellyc0800a32022-06-15 10:57:52 +01003619 if (operatorPtr->intermediates.size() > 3 && desc.m_LayerNormEnabled)
Mike Kelly5880b912022-01-28 16:18:54 +00003620 {
3621 auto inputIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[0]].get(),
3622 inputTensorInfo).first;
3623 auto inputIntermediateTensorInfo = inputIntermediate->GetInfo();
3624 desc.m_InputIntermediateScale = inputIntermediateTensorInfo.GetQuantizationScale();
3625
3626 auto forgetIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[1]].get(),
3627 inputTensorInfo).first;
3628 auto forgetIntermediateTensorInfo = forgetIntermediate->GetInfo();
3629 desc.m_ForgetIntermediateScale = forgetIntermediateTensorInfo.GetQuantizationScale();
3630
3631 auto cellIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[2]].get(),
3632 inputTensorInfo).first;
3633 auto cellIntermediateTensorInfo = cellIntermediate->GetInfo();
3634 desc.m_CellIntermediateScale = cellIntermediateTensorInfo.GetQuantizationScale();
3635
3636 auto outputIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[3]].get(),
3637 inputTensorInfo).first;
3638 auto outputIntermediateTensorInfo = outputIntermediate->GetInfo();
3639 desc.m_OutputIntermediateScale = outputIntermediateTensorInfo.GetQuantizationScale();
3640 }
3641 else
3642 {
3643 float defaultIntermediate = std::pow(2, -12);
3644 desc.m_InputIntermediateScale = defaultIntermediate;
3645 desc.m_ForgetIntermediateScale = defaultIntermediate;
3646 desc.m_CellIntermediateScale = defaultIntermediate;
3647 desc.m_OutputIntermediateScale = defaultIntermediate;
3648 }
3649
Mike Kellyc0800a32022-06-15 10:57:52 +01003650 if (operatorPtr->intermediates.size() > 4)
3651 {
3652 auto hiddentensor = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[4]].get(),
3653 inputTensorInfo).first;
Mike Kelly5880b912022-01-28 16:18:54 +00003654
Mike Kellyc0800a32022-06-15 10:57:52 +01003655 desc.m_HiddenStateScale = hiddentensor->GetInfo().GetQuantizationScale();
3656 desc.m_HiddenStateZeroPoint = hiddentensor->GetInfo().GetQuantizationOffset();
3657 }
Mike Kelly5880b912022-01-28 16:18:54 +00003658 unsigned int batchSize = inputTensorInfo.GetShape()[0];
3659 unsigned int outputSize = outputTensorInfo.GetShape()[2];
3660 unsigned int numUnits = cellStateInInfo.GetShape()[1];
3661
3662 armnn::DataType dataType = inputTensorInfo.GetDataType();
3663 float qScale = inputTensorInfo.GetQuantizationScale();
3664 float qOffset = inputTensorInfo.GetQuantizationOffset();
3665
3666 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset);
3667 if (!desc.m_CifgEnabled)
3668 {
3669 scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
3670 }
3671 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits},
3672 cellStateInInfo.GetDataType(),
3673 cellStateInInfo.GetQuantizationScale(),
3674 cellStateInInfo.GetQuantizationOffset());
3675 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
3676
3677 armnn::LstmInputParamsInfo paramsInfo;
3678 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
3679 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
3680 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
3681 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
3682 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
3683 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
3684 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
3685 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
3686 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
3687
3688 if (!desc.m_CifgEnabled)
3689 {
3690 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
3691 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
3692 if (params.m_CellToInputWeights != nullptr)
3693 {
3694 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
3695 }
3696 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
3697 }
3698
3699 if (desc.m_ProjectionEnabled)
3700 {
3701 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
3702 if (params.m_ProjectionBias != nullptr)
3703 {
3704 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
3705 }
3706 }
3707
3708 if (desc.m_PeepholeEnabled)
3709 {
3710 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
3711 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
3712 }
3713
3714 if (desc.m_LayerNormEnabled)
3715 {
3716 if(!desc.m_CifgEnabled)
3717 {
3718 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
3719 }
3720 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
3721 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
3722 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
3723 }
3724
3725 auto layerName = fmt::format("UnidirectionalSequenceLSTM:{}:{}", subgraphIndex, operatorIndex);
3726 armnn::IConnectableLayer* layer = m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
3727 ARMNN_ASSERT(layer != nullptr);
3728
3729 // register the input connection slots for the layer, connections are made after all layers have been created
3730 // only the tensors for the inputs are relevant, exclude the const tensors
3731 auto inputTensorIndexes = AsUnsignedVector({operatorPtr->inputs[0],
3732 operatorPtr->inputs[18],
3733 operatorPtr->inputs[19]});
3734 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0],
3735 inputTensorIndexes[1],
3736 inputTensorIndexes[2]});
3737
3738 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3739
3740 layer->GetOutputSlot(0).SetTensorInfo(outputStateOutTensorInfo);
3741 layer->GetOutputSlot(1).SetTensorInfo(cellStateOutTensorInfo);
3742 layer->GetOutputSlot(2).SetTensorInfo(outputTensorInfo);
3743
3744 unsigned int tensorIndex = outputTensorIndexes[0];
3745 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(2));
3746 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3747}
3748
Kevin May7d96b162021-02-03 17:38:41 +00003749void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd200e3802019-04-15 09:47:39 +01003750{
3751 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3752
Mike Kelly0d77ae12022-01-07 17:42:27 +00003753 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3754 const auto* options = operatorPtr->builtin_options.AsUnpackOptions();
Nina Drozd200e3802019-04-15 09:47:39 +01003755
3756 // This unpackAxis indicates the axis to unpack
3757 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
3758
3759 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3760 CHECK_VALID_SIZE(inputs.size(), 1);
3761
Mike Kelly377fb212023-01-10 15:55:28 +00003762 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003763
3764 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
3765 {
3766 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003767 fmt::format("The unpack axis: {} cannot be greater than or equal to "
3768 "the number of input dimension {} {}",
3769 unpackAxis,
3770 inputTensorInfo.GetNumDimensions(),
3771 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003772 }
3773
Nina Drozd200e3802019-04-15 09:47:39 +01003774 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
3775 // If num is not defined, automatically infer from the length of the dimension axis.
3776 if(unpackNum == 0)
3777 {
3778 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
3779 }
3780
3781 // If unpack number cannot be inferred and is still zero, throw ParseException.
3782 if(unpackNum == 0)
3783 {
3784 throw ParseException("Number to unpack must greater than zero.");
3785 }
3786
3787 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3788 CHECK_VALID_SIZE(outputs.size(), unpackNum);
3789
3790 auto inputDimSize = inputTensorInfo.GetNumDimensions();
3791 std::vector<unsigned int> unpackDimSizes(inputDimSize);
3792
3793 // Add current input shape to unpackDimSizes
3794 for (unsigned int i = 0; i < inputDimSize; ++i)
3795 {
3796 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
3797 }
3798
3799 if (unpackDimSizes[unpackAxis] != unpackNum)
3800 {
3801 throw ParseException("Number to unpack must be the same as length of the dimension to "
3802 "unpack along.");
3803 }
3804
3805 unpackDimSizes[unpackAxis] /= unpackNum;
3806
3807 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
3808 for (unsigned int j = 0; j < unpackNum; ++j)
3809 {
3810 // Set the size of the views.
3811 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
3812 {
3813 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
3814 }
3815 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
3816 }
3817
James Ward58dec6b2020-09-11 17:32:44 +01003818 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01003819 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003820 ARMNN_ASSERT(layer != nullptr);
Nina Drozd200e3802019-04-15 09:47:39 +01003821
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003822 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
3823 unpackDimSizes.data());
3824
Nina Drozd200e3802019-04-15 09:47:39 +01003825 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3826 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3827
Finn Williamsb49ed182021-06-29 15:50:08 +01003828 std::vector<unsigned int> reshapeDims;
3829 for (unsigned int axis = 0; axis < splitOutShape.GetNumDimensions(); ++axis)
3830 {
3831 if (axis != unpackAxis)
3832 {
3833 reshapeDims.push_back(splitOutShape[axis]);
3834 }
3835 }
3836
3837 TensorShape reshapeOutputShape(splitOutShape.GetNumDimensions() -1, reshapeDims.data());
3838
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003839 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
3840 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
3841 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01003842 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01003843 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003844 armnn::ReshapeDescriptor desc;
Finn Williamsb49ed182021-06-29 15:50:08 +01003845 desc.m_TargetShape = reshapeOutputShape;
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003846 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
3847
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01003848 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
3849 outputTensorInfo.GetDataType(),
3850 outputTensorInfo.GetQuantizationScale(),
3851 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003852 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
3853
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01003854 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003855
3856 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
3857 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
3858 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
3859 }
Nina Drozd200e3802019-04-15 09:47:39 +01003860}
3861
Kevin May7d96b162021-02-03 17:38:41 +00003862void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd0324f482019-04-08 10:52:10 +01003863{
3864 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3865
Mike Kelly0d77ae12022-01-07 17:42:27 +00003866 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3867 const auto* options = operatorPtr->builtin_options.AsSplitOptions();
Nina Drozd0324f482019-04-08 10:52:10 +01003868
3869 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
3870
Nina Drozd200e3802019-04-15 09:47:39 +01003871 // If number of splits cannot be inferred and is zero, throw ParseException.
3872 if(numSplits == 0)
3873 {
3874 throw ParseException("Number to splits must greater than zero.");
3875 }
3876
Nina Drozd0324f482019-04-08 10:52:10 +01003877 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3878 CHECK_VALID_SIZE(inputs.size(), 2);
3879 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3880 CHECK_VALID_SIZE(outputs.size(), numSplits);
3881
Mike Kelly377fb212023-01-10 15:55:28 +00003882 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
3883 armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003884 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Nina Drozd0324f482019-04-08 10:52:10 +01003885
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003886 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003887 if (axisBufferPtr == nullptr)
3888 {
3889 throw ParseException(
3890 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
3891 CHECK_LOCATION().AsString()));
3892 }
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003893
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003894 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
3895 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
3896 int32_t axis = axisData[0];
3897
3898 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3899 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3900 {
3901 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
3902 // E.g. Rank 4 tensor can have axis in range [-4, 3)
3903 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
3904 throw ParseException(
3905 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
3906 axis,
3907 CHECK_LOCATION().AsString()));
3908 }
3909
3910 const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
Nina Drozd0324f482019-04-08 10:52:10 +01003911
Nina Drozd0324f482019-04-08 10:52:10 +01003912 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003913 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01003914 {
3915 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003916 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
3917 inputTensorInfo.GetNumDimensions(),
3918 MaxNumOfTensorDimensions,
3919 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01003920 }
3921
3922 std::vector<unsigned int> splitterDimSizes(inputDimSize);
3923
3924 // Add current input shape to splitterDimSizes
3925 for (unsigned int i = 0; i < inputDimSize; ++i)
3926 {
3927 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
3928 }
3929
3930 if (splitterDimSizes[splitDim] % numSplits != 0)
3931 {
3932 throw ParseException("Number of splits must evenly divide the dimension");
3933 }
3934 splitterDimSizes[splitDim] /= numSplits;
3935
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003936 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01003937 for (unsigned int j = 0; j < numSplits; ++j)
3938 {
3939 // Set the size of the views.
3940 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
3941 {
3942 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
3943 }
3944 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
3945 }
3946
James Ward58dec6b2020-09-11 17:32:44 +01003947 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01003948 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003949 ARMNN_ASSERT(layer != nullptr);
Nina Drozd0324f482019-04-08 10:52:10 +01003950
3951 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003952 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01003953
Nina Drozd0324f482019-04-08 10:52:10 +01003954 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
3955 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01003956 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01003957 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01003958 }
3959
3960 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3961 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3962}
3963
Derek Lambertif0176992020-04-28 13:37:49 +01003964unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
3965{
3966 int numDims = armnn::numeric_cast<int>(numDimsIn);
3967 int v = idx < 0 ? numDims + idx : idx;
3968 ARMNN_ASSERT(v >= 0);
3969 ARMNN_ASSERT(v < numDims);
3970
3971 return static_cast<unsigned int>(v);
3972}
3973
Kevin May7d96b162021-02-03 17:38:41 +00003974void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
Derek Lambertif0176992020-04-28 13:37:49 +01003975{
3976 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3977
Mike Kelly0d77ae12022-01-07 17:42:27 +00003978 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3979 const auto* options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01003980
3981 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3982 CHECK_VALID_SIZE(inputs.size(), 3);
3983
3984 auto& inputTensor = inputs[0];
3985 auto& splitsTensor = inputs[1];
3986 auto& axisTensor = inputs[2];
3987
3988 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
3989 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
3990 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
3991 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
3992
3993 // Inputs
3994 auto inputDimSize = inputTensorInfo.GetNumDimensions();
3995 if (inputDimSize > MaxNumOfTensorDimensions)
3996 {
3997 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003998 fmt::format("The number of dimensions: {} for input tensors of the "
3999 "SplitV op cannot be greater than {} {}",
4000 inputTensorInfo.GetNumDimensions(),
4001 MaxNumOfTensorDimensions,
4002 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01004003 }
4004
4005 // Get split axis
4006 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004007 if (axisBufferPtr == nullptr)
4008 {
4009 throw ParseException(
4010 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4011 CHECK_LOCATION().AsString()));
4012 }
4013
Derek Lambertif0176992020-04-28 13:37:49 +01004014 std::vector<int> axisData(axisTensorInfo.GetNumElements());
4015 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004016 int32_t axis = axisData[0];
4017
4018 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4019 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4020 {
4021 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4022 // E.g. Rank 4 tensor can have axis in range [-4, 3)
4023 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4024 throw ParseException(
4025 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4026 axis,
4027 CHECK_LOCATION().AsString()));
4028 }
4029 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
Derek Lambertif0176992020-04-28 13:37:49 +01004030
Derek Lambertif0176992020-04-28 13:37:49 +01004031 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01004032 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01004033 unsigned int numSplits{0};
4034
4035 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01004036 {
4037 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01004038 }
4039 else
4040 {
Ryan OShea86704732020-05-26 11:41:04 +01004041 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01004042 }
4043
4044 if (numSplits <=0)
4045 {
4046 throw ParseException("SplitV has invalid number of splits");
4047 }
4048
Jan Eilersc0761e92020-06-29 16:48:44 +01004049 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01004050 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01004051 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01004052
Jan Eilersc0761e92020-06-29 16:48:44 +01004053 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01004054 int numInferred{0};
4055 unsigned int inferIdx{0};
4056 int splitSum{0};
4057 for (auto split : splitsData)
4058 {
4059 if (split < 0)
4060 {
4061 numInferred++;
4062 inferIdx = idx;
4063 }
4064 else
4065 {
4066 splitSum += split;
4067 }
4068 idx++;
4069 }
4070 // Check for inferred Axis
4071 if (numInferred == 0)
4072 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004073 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01004074 {
4075 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
4076 }
4077 }
4078 else if (numInferred == 1)
4079 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004080 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01004081 }
4082 else
4083 {
4084 throw ParseException("Cannot infer split size for more than one split");
4085 }
4086
Derek Lambertif0176992020-04-28 13:37:49 +01004087 //Ouput size validation
4088 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4089 CHECK_VALID_SIZE(outputs.size(), numSplits);
4090
4091 // Setup Armnn descriptor
4092 SplitterDescriptor splitDesc(numSplits, inputDimSize);
4093 unsigned int accumSplit = 0;
4094 for (unsigned int j = 0; j < numSplits; ++j)
4095 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004096 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01004097
4098 // Set the size of the views.
4099 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
4100 {
4101 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
4102 if (dimIdx == splitDim)
4103 {
4104 dimSize = splitSize;
4105 }
4106 splitDesc.SetViewSize(j, dimIdx, dimSize);
4107 }
4108
4109 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
4110 accumSplit += splitSize;
4111 }
4112
James Ward58dec6b2020-09-11 17:32:44 +01004113 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01004114 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01004115 ARMNN_ASSERT(layer != nullptr);
Derek Lambertif0176992020-04-28 13:37:49 +01004116
4117 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4118 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4119
4120 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4121 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01004122 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01004123 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
4124 }
4125
4126 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4127 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4128}
4129
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004130void TfLiteParserImpl::ParseArgMin(size_t subgraphIndex, size_t operatorIndex)
4131{
4132 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Min);
4133}
4134
Kevin May7d96b162021-02-03 17:38:41 +00004135void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
Inki Daed4619e22020-09-10 15:33:54 +09004136{
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004137 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Max);
4138}
4139
4140void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, ArgMinMaxFunction argMinMaxFunction)
4141{
Inki Daed4619e22020-09-10 15:33:54 +09004142 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4143 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4144 CHECK_VALID_SIZE(inputs.size(), 2);
4145
4146 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4147 CHECK_VALID_SIZE(outputs.size(), 1);
4148
Mike Kelly377fb212023-01-10 15:55:28 +00004149 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4150 armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Inki Daed4619e22020-09-10 15:33:54 +09004151 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004152 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004153
4154 // Check if output tensor type is Signed32 or Signed64
Mike Kelly1f140f72021-04-06 12:25:55 +01004155 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
4156 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
4157 {
4158 throw ParseException(
4159 fmt::format(
4160 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
4161 CHECK_LOCATION().AsString()));
4162 }
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004163
4164 // Get const axis value from model and set it to descriptor.
4165 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
4166 if (axisBufferPtr == nullptr)
4167 {
4168 throw ParseException(
4169 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4170 CHECK_LOCATION().AsString()));
4171 }
4172
4173 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
4174 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
4175 int32_t axis = axisData.front();
4176
4177 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4178 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4179 {
4180 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4181 // E.g. Rank 4 tensor can have axis in range [-4, 3)
4182 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4183 throw ParseException(
4184 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4185 axis,
4186 CHECK_LOCATION().AsString()));
4187 }
4188
4189 ArgMinMaxDescriptor desc;
4190 desc.m_Axis = axis;
4191 desc.m_Function = argMinMaxFunction;
4192
4193 // Register a ArgMin/ArgMax layer.
4194 auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}";
4195 auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4196 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str());
4197 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00004198 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Inki Daed4619e22020-09-10 15:33:54 +09004199 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4200
4201 // Register input tensor to the layer.
4202 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4203 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4204
4205 // Register output tensor to the layer.
4206 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4207 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4208}
4209
Kevin May7d96b162021-02-03 17:38:41 +00004210void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00004211{
4212 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4213
Kevin May7d96b162021-02-03 17:38:41 +00004214 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004215 CHECK_VALID_SIZE(inputs.size(), 2);
Kevin May7d96b162021-02-03 17:38:41 +00004216 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004217 CHECK_VALID_SIZE(outputs.size(), 1);
4218
Mike Kelly377fb212023-01-10 15:55:28 +00004219 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4220 armnn::TensorInfo indicesTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4221 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Sadik Armagan26868492021-01-22 14:25:31 +00004222
4223 armnn::GatherDescriptor gatherDescriptor;
4224
Mike Kelly0d77ae12022-01-07 17:42:27 +00004225 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4226 const auto* options = operatorPtr->builtin_options.AsGatherOptions();
Sadik Armagan26868492021-01-22 14:25:31 +00004227 auto axis = options->axis;
4228
Mike Kelly377fb212023-01-10 15:55:28 +00004229 auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
4230
Sadik Armagan26868492021-01-22 14:25:31 +00004231 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4232 auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
4233 auto outputDimensions = outputTensorInfo.GetNumDimensions();
4234 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4235 {
4236 throw ParseException(
4237 fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
4238 axis,
4239 inputDimensions, inputDimensions,
4240 CHECK_LOCATION().AsString()));
4241 }
4242 if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
4243 {
4244 throw ParseException(
4245 fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
4246 outputDimensions,
4247 inputDimensions, indicesDimensions,
4248 CHECK_LOCATION().AsString()));
4249 }
4250
4251 gatherDescriptor.m_Axis = axis;
4252
Sadik Armagan26868492021-01-22 14:25:31 +00004253 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
4254 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00004255 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Sadik Armagan26868492021-01-22 14:25:31 +00004256 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4257
4258 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4259 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4260
4261 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4262 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4263}
4264
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004265void TfLiteParserImpl::ParseGatherNd(size_t subgraphIndex, size_t operatorIndex)
4266{
4267 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4268
4269 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4270 CHECK_VALID_SIZE(inputs.size(), 2);
4271 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4272 CHECK_VALID_SIZE(outputs.size(), 1);
4273
Mike Kelly377fb212023-01-10 15:55:28 +00004274 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4275 armnn::TensorInfo indicesTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004276
4277 auto layerName = fmt::format("GatherNd:{}:{}", subgraphIndex, operatorIndex);
4278 IConnectableLayer* layer = m_Network->AddGatherNdLayer(layerName.c_str());
4279 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00004280 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004281 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4282
4283 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4284 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4285
4286 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4287 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4288}
4289
Kevin May7d96b162021-02-03 17:38:41 +00004290void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00004291{
4292 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4293
Kevin May7d96b162021-02-03 17:38:41 +00004294 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004295 CHECK_VALID_SIZE(inputs.size(), 1);
Kevin May7d96b162021-02-03 17:38:41 +00004296 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004297 CHECK_VALID_SIZE(outputs.size(), 1);
4298
4299 armnn::DepthToSpaceDescriptor descriptor;
4300
Mike Kelly0d77ae12022-01-07 17:42:27 +00004301 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4302 const auto* options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
Sadik Armagan26868492021-01-22 14:25:31 +00004303 auto blockSize = options->block_size;
4304 if (blockSize < 2)
4305 {
4306 throw ParseException(
4307 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
4308 blockSize,
4309 CHECK_LOCATION().AsString()));
4310 }
4311 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
4312
4313 auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
4314 IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
4315 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00004316 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan26868492021-01-22 14:25:31 +00004317 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4318
4319 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4320 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4321
4322 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4323 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4324}
4325
Kevin May7d96b162021-02-03 17:38:41 +00004326void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004327{
Sadik Armagana2747482021-02-09 10:28:54 +00004328 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Sum);
4329}
4330
Teresa Charlin4e3e8312021-08-05 12:34:37 +01004331void TfLiteParserImpl::ParseReduceProd(size_t subgraphIndex, size_t operatorIndex)
4332{
4333 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Prod);
4334}
4335
Sadik Armagana2747482021-02-09 10:28:54 +00004336void TfLiteParserImpl::ParseReduceMax(size_t subgraphIndex, size_t operatorIndex)
4337{
4338 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Max);
4339}
4340
4341void TfLiteParserImpl::ParseReduceMin(size_t subgraphIndex, size_t operatorIndex)
4342{
4343 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Min);
4344}
4345
4346void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, ReduceOperation reduceOperation)
4347{
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004348 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4349
Mike Kelly0d77ae12022-01-07 17:42:27 +00004350 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4351 const auto* options = operatorPtr->builtin_options.AsReducerOptions();
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004352
4353 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4354 CHECK_VALID_SIZE(inputs.size(), 2);
4355
4356 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4357 CHECK_VALID_SIZE(outputs.size(), 1);
4358
Sadik Armagana2747482021-02-09 10:28:54 +00004359 auto layerName = fmt::format("Reduce:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004360
Mike Kelly377fb212023-01-10 15:55:28 +00004361 armnn::TensorInfo inputTensorInfo0 = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4362 armnn::TensorInfo inputTensorInfo1 = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004363
4364 ReduceDescriptor desc;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004365 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
4366 // Get const axis value from model and set it to descriptor.
4367 if (axisBufferPtr != nullptr)
4368 {
Sadik Armagan49bdb792021-02-11 13:57:07 +00004369 std::vector<int32_t> axisData(inputTensorInfo1.GetNumElements());
4370 ::memcpy(axisData.data(), axisBufferPtr->data.data(), inputTensorInfo1.GetNumBytes());
4371
4372 // Convert the axis to unsigned int and remove duplicates.
4373 auto rank = static_cast<int32_t>(inputTensorInfo0.GetNumDimensions());
4374 std::set<unsigned int> uniqueAxis;
4375 std::transform(axisData.begin(),
4376 axisData.end(),
4377 std::inserter(uniqueAxis, uniqueAxis.begin()),
4378 [rank](int i)->unsigned int{
4379 return static_cast<uint32_t>(((i + rank) % rank)); });
4380 desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004381 }
Sadik Armagana2747482021-02-09 10:28:54 +00004382 else
4383 {
4384 for (uint32_t i = 0; i < inputTensorInfo0.GetNumDimensions(); ++i)
4385 {
4386 desc.m_vAxis.push_back(i);
4387 }
4388 }
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004389
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004390 desc.m_KeepDims = options->keep_dims;
Sadik Armagana2747482021-02-09 10:28:54 +00004391 desc.m_ReduceOperation = reduceOperation;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004392
4393 // Register a new layer object, Sum.
Mike Kelly0d77ae12022-01-07 17:42:27 +00004394 IConnectableLayer* layer = m_Network->AddReduceLayer(desc, layerName.c_str());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004395
Mike Kelly377fb212023-01-10 15:55:28 +00004396 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004397 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4398
4399 // Register input tensor to the layer.
4400 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4401 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4402
4403 // Register output tensor to the layer.
4404 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4405 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4406}
4407
Mike Kelly31dce2b2021-09-01 21:22:37 +01004408void TfLiteParserImpl::ParseLocalResponseNormalization(size_t subgraphIndex, size_t operatorIndex)
4409{
4410 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4411
4412 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4413 CHECK_VALID_SIZE(inputs.size(), 1);
4414
4415 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4416 CHECK_VALID_SIZE(outputs.size(), 1);
4417
4418 auto layerName = fmt::format("LRN:{}:{}", subgraphIndex, operatorIndex);
4419 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4420
Mike Kelly377fb212023-01-10 15:55:28 +00004421 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly31dce2b2021-09-01 21:22:37 +01004422
4423 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4424 const auto* options = operatorPtr->builtin_options.AsLocalResponseNormalizationOptions();
4425
4426 armnn::NormalizationDescriptor descriptor;
4427 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4428 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
4429 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
4430 descriptor.m_NormSize = static_cast<uint32_t>(options->radius);
4431 descriptor.m_K = options->bias;
4432 descriptor.m_Alpha = options->alpha;
4433 descriptor.m_Beta = options->beta;
4434
4435 // ArmNN expects normSize to be the full size of the normalization
4436 // window rather than the radius as in TfLite.
4437 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
4438
4439 IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor, layerNameFormatted.c_str());
4440 ARMNN_ASSERT(layer != nullptr);
4441
Mike Kelly377fb212023-01-10 15:55:28 +00004442 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Mike Kelly31dce2b2021-09-01 21:22:37 +01004443 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4444
4445 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4446 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4447
4448 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4449 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4450}
4451
Teresa Charlin28aa6692022-07-12 11:18:44 +01004452void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
4453{
4454 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
4455}
4456
4457void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
4458{
4459 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
4460}
4461
4462void TfLiteParserImpl::ParseLog(size_t subgraphIndex, size_t operatorIndex)
4463{
4464 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Log);
4465}
4466
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004467void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
4468{
4469 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
4470}
4471
4472void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
4473{
4474 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
4475}
4476
4477void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
4478{
4479 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
4480}
4481
Teresa Charlin28aa6692022-07-12 11:18:44 +01004482void TfLiteParserImpl::ParseSin(size_t subgraphIndex, size_t operatorIndex)
4483{
4484 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sin);
4485}
4486
Teresa Charlinf0fce5b2022-05-04 17:24:43 +01004487void TfLiteParserImpl::ParseSqrt(size_t subgraphIndex, size_t operatorIndex)
4488{
4489 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sqrt);
4490}
4491
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004492void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
4493{
4494 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4495
4496 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4497 CHECK_VALID_SIZE(inputs.size(), 1);
4498
4499 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4500 CHECK_VALID_SIZE(outputs.size(), 1);
4501
4502 std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
4503 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4504
4505 ElementwiseUnaryDescriptor desc;
4506 desc.m_Operation = unaryOperation;
4507 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
4508 ARMNN_ASSERT(layer != nullptr);
4509
Mike Kelly377fb212023-01-10 15:55:28 +00004510 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004511 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4512
4513 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4514 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4515
4516 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4517 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4518}
4519
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03004520void TfLiteParserImpl::ParseEqual(size_t subgraphIndex, size_t operatorIndex)
4521{
4522 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Equal);
4523}
4524
4525void TfLiteParserImpl::ParseNotEqual(size_t subgraphIndex, size_t operatorIndex)
4526{
4527 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::NotEqual);
4528}
4529
4530void TfLiteParserImpl::ParseGreater(size_t subgraphIndex, size_t operatorIndex)
4531{
4532 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Greater);
4533}
4534
4535void TfLiteParserImpl::ParseGreaterOrEqual(size_t subgraphIndex, size_t operatorIndex)
4536{
4537 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::GreaterOrEqual);
4538}
4539
4540void TfLiteParserImpl::ParseLess(size_t subgraphIndex, size_t operatorIndex)
4541{
4542 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Less);
4543}
4544
4545void TfLiteParserImpl::ParseLessOrEqual(size_t subgraphIndex, size_t operatorIndex)
4546{
4547 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::LessOrEqual);
4548}
4549
4550void TfLiteParserImpl::ParseComparison(size_t subgraphIndex, size_t operatorIndex,
4551 ComparisonOperation comparisonOperation)
4552{
4553 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4554
4555 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4556 CHECK_VALID_SIZE(inputs.size(), 2);
4557
4558 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4559 CHECK_VALID_SIZE(outputs.size(), 1);
4560
4561 auto layerName = std::string(GetComparisonOperationAsCString(comparisonOperation)) + ":{}:{}";
4562 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4563
Mike Kelly377fb212023-01-10 15:55:28 +00004564 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4565 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03004566 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerNameFormatted, "Input 0", "Input 1");
4567
4568 ComparisonDescriptor desc;
4569 desc.m_Operation = comparisonOperation;
4570 IConnectableLayer* layer = m_Network->AddComparisonLayer(desc, layerNameFormatted.c_str());
4571 ARMNN_ASSERT(layer != nullptr);
4572
Mike Kelly377fb212023-01-10 15:55:28 +00004573 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03004574 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4575
4576 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4577 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4578
4579 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4580 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4581}
4582
Mike Kelly04d82292023-01-19 18:29:40 +00004583armnn::IConnectableLayer* TfLiteParserImpl::AddReshapeLayer(armnn::IConnectableLayer* layer,
4584 unsigned int outputSlot,
4585 std::string reshapeLayerName,
4586 armnn::TensorInfo outputShape)
4587{
4588 ReshapeDescriptor desc;
4589 desc.m_TargetShape = outputShape.GetShape();
4590
4591 IConnectableLayer* reshapeLayer =
4592 m_Network->AddReshapeLayer(desc, reshapeLayerName.c_str());
4593
4594 auto & prevOutputSlot = layer->GetOutputSlot(outputSlot);
4595 prevOutputSlot.Connect(reshapeLayer->GetInputSlot(0));
4596 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputShape);
4597 return reshapeLayer;
4598}
4599
Kevin May7d96b162021-02-03 17:38:41 +00004600armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
4601 unsigned int outputSlot,
4602 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01004603{
4604 ActivationDescriptor activationDesc;
4605 std::string layerName = prevLayer->GetName();
4606
4607 switch(activationType)
4608 {
4609 case tflite::ActivationFunctionType_NONE:
4610 {
4611 // this is a no-op: return previous layer
4612 return prevLayer;
4613 }
4614 case tflite::ActivationFunctionType_RELU:
4615 {
4616 activationDesc.m_Function = ActivationFunction::ReLu;
4617 layerName += ":RELU";
4618 break;
4619 }
4620 case tflite::ActivationFunctionType_RELU6:
4621 {
4622 activationDesc.m_Function = ActivationFunction::BoundedReLu;
4623 activationDesc.m_A = 6.0f;
4624 activationDesc.m_B = 0.0f;
4625 layerName += ":RELU6";
4626 break;
4627 }
4628 case tflite::ActivationFunctionType_TANH:
4629 {
4630 activationDesc.m_Function = ActivationFunction::TanH;
4631 activationDesc.m_A = 1.0f;
4632 activationDesc.m_B = 1.0f;
4633 layerName += ":TANH";
4634 break;
4635 }
4636
4637 // I only put these here as a reminder what others we could support
4638 case tflite::ActivationFunctionType_RELU_N1_TO_1:
4639 case tflite::ActivationFunctionType_SIGN_BIT:
4640 default:
4641 {
4642 throw ParseException(
Mike Kelly377fb212023-01-10 15:55:28 +00004643 fmt::format("TfLite parser doesn't support fused activation: "
James Ward58dec6b2020-09-11 17:32:44 +01004644 "{}/{} {} ",
4645 activationType,
4646 tflite::EnumNameActivationFunctionType(activationType),
4647 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004648
4649 }
4650 }
4651
4652 IConnectableLayer* activationLayer =
4653 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
4654
4655 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
4656 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
4657 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
4658 return activationLayer;
4659}
4660
Teresa Charlincdbd40b2022-02-25 13:21:55 +00004661armnn::IConnectableLayer* TfLiteParserImpl::AddFusedFloorLayer(armnn::IConnectableLayer* prevLayer,
4662 unsigned int outputSlot)
4663{
Teresa Charlin725728e2022-05-05 13:33:33 +01004664
4665 auto& prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
4666 DataType dataType = prevOutputSlot.GetTensorInfo().GetDataType();
4667
4668 if (dataType == DataType::Signed32)
4669 {
4670 return prevLayer;
4671 }
4672
Teresa Charlincdbd40b2022-02-25 13:21:55 +00004673 std::string layerName = prevLayer->GetName();
4674 IConnectableLayer* floorLayer = m_Network->AddFloorLayer(layerName.c_str());
4675
Teresa Charlincdbd40b2022-02-25 13:21:55 +00004676 prevOutputSlot.Connect(floorLayer->GetInputSlot(0));
4677 floorLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
Teresa Charlin725728e2022-05-05 13:33:33 +01004678
Teresa Charlincdbd40b2022-02-25 13:21:55 +00004679 return floorLayer;
4680}
4681
Mike Kelly0d77ae12022-01-07 17:42:27 +00004682TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char* fileName)
telsoa01c577f2c2018-08-31 09:22:23 +01004683{
4684 if (fileName == nullptr)
4685 {
James Ward58dec6b2020-09-11 17:32:44 +01004686 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01004687 CHECK_LOCATION().AsString()));
4688 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01004689 std::error_code errorCode;
4690 fs::path pathToFile(fileName);
4691 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01004692 {
James Ward58dec6b2020-09-11 17:32:44 +01004693 //fmt::format() could not be used here (format error)
4694 std::stringstream msg;
4695 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
4696 << " " << CHECK_LOCATION().AsString();
4697
4698 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01004699 }
4700 std::ifstream file(fileName, std::ios::binary);
4701 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
4702 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
4703 fileContent.size());
4704}
4705
Mike Kelly0d77ae12022-01-07 17:42:27 +00004706TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromBinary(const uint8_t* binaryContent, size_t len)
telsoa01c577f2c2018-08-31 09:22:23 +01004707{
4708 if (binaryContent == nullptr)
4709 {
James Ward58dec6b2020-09-11 17:32:44 +01004710 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01004711 CHECK_LOCATION().AsString()));
4712 }
4713 flatbuffers::Verifier verifier(binaryContent, len);
4714 if (verifier.VerifyBuffer<tflite::Model>() == false)
4715 {
4716 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004717 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
4718 "flatbuffers format. size:{} {}",
4719 len,
4720 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004721 }
4722 return tflite::UnPackModel(binaryContent);
4723}
4724
Mike Kelly0d77ae12022-01-07 17:42:27 +00004725TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetInputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00004726 size_t subgraphIndex,
4727 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004728{
4729 CHECK_MODEL(model, subgraphIndex, operatorIndex);
4730
Mike Kelly0d77ae12022-01-07 17:42:27 +00004731 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4732 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004733
4734 size_t inputCount = operatorPtr->inputs.size();
mathad01c21025d2021-04-26 10:09:37 +01004735 TensorRawPtrVector result;
Mike Kelly0d77ae12022-01-07 17:42:27 +00004736 for (size_t i = 0; i < inputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004737 {
mathad01c21025d2021-04-26 10:09:37 +01004738 // If the input location is -1 then assume input is turned off.
4739 if (operatorPtr->inputs[i] == -1)
4740 {
4741 continue;
4742 }
4743 else
4744 {
4745 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
4746 result.push_back(subgraphPtr->tensors[inputId].get());
4747 }
telsoa01c577f2c2018-08-31 09:22:23 +01004748 }
4749 return result;
4750}
4751
Mike Kelly0d77ae12022-01-07 17:42:27 +00004752TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetOutputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00004753 size_t subgraphIndex,
4754 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004755{
4756 CHECK_MODEL(model, subgraphIndex, operatorIndex);
4757
Mike Kelly0d77ae12022-01-07 17:42:27 +00004758 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4759 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004760
4761 size_t outputCount = operatorPtr->outputs.size();
4762 TensorRawPtrVector result(outputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004763 for (size_t i = 0; i < outputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004764 {
4765 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
4766 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01004767 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01004768 }
4769 return result;
4770}
4771
Mike Kelly0d77ae12022-01-07 17:42:27 +00004772TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphInputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00004773 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004774{
4775 CHECK_SUBGRAPH(model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004776 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004777
Derek Lambertiff05cc52019-04-26 13:05:17 +01004778 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01004779 TensorIdRawPtrVector result(inputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004780 for (size_t i = 0; i < inputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004781 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01004782 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01004783 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01004784 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01004785 }
4786 return result;
4787}
4788
Mike Kelly0d77ae12022-01-07 17:42:27 +00004789TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphOutputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00004790 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004791{
4792 CHECK_SUBGRAPH(model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004793 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004794
Derek Lambertiff05cc52019-04-26 13:05:17 +01004795 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01004796 TensorIdRawPtrVector result(outputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004797 for (size_t i = 0; i < outputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004798 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01004799 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
4800 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01004801 }
4802 return result;
4803}
4804
Kevin May7d96b162021-02-03 17:38:41 +00004805std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
4806 size_t subgraphIndex,
4807 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004808{
4809 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004810 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4811 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004812 return operatorPtr->inputs;
4813}
4814
Kevin May7d96b162021-02-03 17:38:41 +00004815std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
4816 size_t subgraphIndex,
4817 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004818{
4819 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004820 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4821 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004822 return operatorPtr->outputs;
4823}
4824
Kevin May7d96b162021-02-03 17:38:41 +00004825void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
4826 size_t operatorIndex,
4827 IConnectableLayer* layer,
Finn Williamsd4fa5452021-03-01 12:31:41 +00004828 const std::vector<unsigned int>& tensorIndexes,
4829 unsigned int startingSlotIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004830{
4831 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01004832 ARMNN_ASSERT(layer != nullptr);
Matthew Sloyan81beae32021-07-13 19:46:11 +01004833
Finn Williamsd4fa5452021-03-01 12:31:41 +00004834 if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots())
telsoa01c577f2c2018-08-31 09:22:23 +01004835 {
4836 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004837 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
4838 " for subgraph:{} operator index:{} {}",
4839 tensorIndexes.size(),
4840 layer->GetNumInputSlots(),
4841 subgraphIndex,
4842 operatorIndex,
4843 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004844 }
4845
Finn Williamsd4fa5452021-03-01 12:31:41 +00004846 for (unsigned int index = 0; index < tensorIndexes.size() ; ++index)
telsoa01c577f2c2018-08-31 09:22:23 +01004847 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00004848 unsigned int tensorIndex = tensorIndexes[index];
4849 armnn::IInputSlot* slot = &(layer->GetInputSlot(startingSlotIndex + index));
telsoa01c577f2c2018-08-31 09:22:23 +01004850 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
4851 }
4852}
4853
Kevin May7d96b162021-02-03 17:38:41 +00004854void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
4855 size_t operatorIndex,
4856 IConnectableLayer* layer,
4857 const std::vector<unsigned int>& tensorIndexes)
telsoa01c577f2c2018-08-31 09:22:23 +01004858{
4859 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01004860 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01004861 if (tensorIndexes.size() != layer->GetNumOutputSlots())
4862 {
4863 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004864 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
4865 " for subgraph:{} operator index:{} {}",
4866 tensorIndexes.size(),
4867 layer->GetNumOutputSlots(),
4868 subgraphIndex,
4869 operatorIndex,
4870 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004871 }
4872
4873 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
4874 {
4875 unsigned int tensorIndex = tensorIndexes[slotIndex];
4876 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
4877 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
4878 }
4879}
4880
Mike Kelly377fb212023-01-10 15:55:28 +00004881void TfLiteParserImpl::SetupInputLayerTensorInfos(size_t subgraphIndex)
4882{
4883 CHECK_SUBGRAPH(m_Model, subgraphIndex);
4884
4885 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
4886 for (auto const& tensorIdAndPtr : inputs)
4887 {
4888 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
4889 m_TensorInfos.insert({tensorIdAndPtr.first, tensorInfo});
4890 }
4891}
4892
Kevin May7d96b162021-02-03 17:38:41 +00004893void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004894{
4895 CHECK_SUBGRAPH(m_Model, subgraphIndex);
4896
4897 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004898 for (auto const& tensorIdAndPtr : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01004899 {
4900 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
4901 IConnectableLayer* layer =
4902 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
4903
4904 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
4905 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
4906
4907 RegisterOutputSlots(subgraphIndex,
4908 VIRTUAL_OPERATOR_ID,
4909 layer,
4910 { static_cast<uint32_t>(tensorIdAndPtr.first) });
4911 }
4912}
4913
Kevin May7d96b162021-02-03 17:38:41 +00004914void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004915{
4916 CHECK_SUBGRAPH(m_Model, subgraphIndex);
4917
4918 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004919 for (auto const& tensorIdAndPtr : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01004920 {
4921 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
4922 IConnectableLayer* layer =
4923 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
4924
4925 RegisterInputSlots(subgraphIndex,
4926 VIRTUAL_OPERATOR_ID,
4927 layer,
4928 { static_cast<uint32_t>(tensorIdAndPtr.first) });
4929 }
4930}
4931
Mike Kelly377fb212023-01-10 15:55:28 +00004932void TfLiteParserImpl::SetupConstantLayerTensorInfos(size_t subgraph)
4933{
4934 CHECK_SUBGRAPH(m_Model, subgraph);
4935
4936 const auto & subgraphPtr = m_Model->subgraphs[subgraph];
4937 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
4938 {
4939 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
4940 {
4941 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
4942 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
4943 {
4944 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
4945
4946 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
4947
4948 m_TensorInfos.insert({tensorIndex, tensorInfo});
4949 }
4950 }
4951 }
4952}
4953
Mike Kelly5880b912022-01-28 16:18:54 +00004954void TfLiteParserImpl::SetupConstantLayers(size_t subgraph)
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02004955{
Mike Kelly5880b912022-01-28 16:18:54 +00004956 CHECK_SUBGRAPH(m_Model, subgraph);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02004957
Mike Kelly5880b912022-01-28 16:18:54 +00004958 const auto & subgraphPtr = m_Model->subgraphs[subgraph];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02004959 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
4960 {
4961 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
4962 {
4963 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
4964 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
4965 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01004966 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02004967
Mike Kelly5880b912022-01-28 16:18:54 +00004968 if (IsConstTensor(tensorPtr))
Matthew Sloyan81beae32021-07-13 19:46:11 +01004969 {
4970 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
Mike Kelly5880b912022-01-28 16:18:54 +00004971 armnn::DataType dataType = tensorInfo.GetDataType();
4972
4973 if (std::find(m_ConstantsToDequantize.begin(), m_ConstantsToDequantize.end(), tensorPtr->buffer)
4974 != m_ConstantsToDequantize.end())
4975 {
4976 dataType = DataType::Float32;
4977 }
4978 auto tensorAndData = CreateConstTensorNonPermuted(tensorPtr, tensorInfo, dataType);
4979
4980 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
4981 IConnectableLayer *layer = m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
4982
4983 layer->GetOutputSlot(0).SetTensorInfo(tensorAndData.first.GetInfo());
4984 RegisterOutputSlots(subgraphIndex,
4985 VIRTUAL_OPERATOR_ID,
4986 layer,
4987 { tensorIndex });
4988 }
4989 else if (ShouldConstantTensorBeCreated(tensorIndex))
4990 {
4991 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
4992 armnn::DataType dataType = tensorInfo.GetDataType();
4993
4994 if (std::find(m_ConstantsToDequantize.begin(), m_ConstantsToDequantize.end(), tensorPtr->buffer)
4995 != m_ConstantsToDequantize.end())
4996 {
4997 dataType = DataType::Float32;
4998 }
4999 // Make sure isConstant flag is set.
5000 tensorInfo.SetConstant();
5001 tensorInfo.SetDataType(dataType);
5002
5003 auto tensorAndData = ConstTensor(tensorInfo, std::vector<uint8_t>(tensorInfo.GetNumBytes()));
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005004
Matthew Sloyan81beae32021-07-13 19:46:11 +01005005 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005006 IConnectableLayer* layer = m_Network->AddConstantLayer(tensorAndData, layerName.c_str());
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005007
Matthew Sloyan81beae32021-07-13 19:46:11 +01005008 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
5009 RegisterOutputSlots(subgraphIndex,
5010 VIRTUAL_OPERATOR_ID,
5011 layer,
Mike Kelly5880b912022-01-28 16:18:54 +00005012 {tensorIndex});
Matthew Sloyan81beae32021-07-13 19:46:11 +01005013 }
5014 else
5015 {
5016 throw ParseException(
5017 fmt::format("Invalid Tensor: Tensor should be constant. {}",
5018 CHECK_LOCATION().AsString()));
5019 }
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005020 }
5021 }
5022 }
5023}
5024
telsoa01c577f2c2018-08-31 09:22:23 +01005025// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Kevin May7d96b162021-02-03 17:38:41 +00005026TfLiteParserImpl::BufferRawPtr TfLiteParserImpl::GetBuffer(const ModelPtr& model, size_t bufferIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005027{
5028 CHECK_BUFFER(model, bufferIndex);
5029 return model->buffers[bufferIndex].get();
5030}
5031
Matteo Martincigh747ef822018-12-18 09:26:39 +00005032template<typename T>
Kevin May7d96b162021-02-03 17:38:41 +00005033std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
5034TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
5035 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00005036 armnn::TensorInfo& tensorInfo,
5037 armnn::Optional<armnn::PermutationVector&> permutationVector)
5038{
Matthew Sloyan81beae32021-07-13 19:46:11 +01005039 // Make sure isConstant flag is set.
5040 tensorInfo.SetConstant();
5041
Matteo Martincigh747ef822018-12-18 09:26:39 +00005042 auto constData = CreateConstTensorImpl<T>(bufferPtr,
5043 tensorPtr,
5044 tensorInfo,
5045 permutationVector);
Kevin May7d96b162021-02-03 17:38:41 +00005046 TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
Matteo Martincigh747ef822018-12-18 09:26:39 +00005047 return std::make_pair(constData.first, std::move(storage));
5048}
5049
Mike Kelly5880b912022-01-28 16:18:54 +00005050bool TfLiteParserImpl::ShouldConstantTensorBeCreated(unsigned int tensorIndex)
5051{
5052 // If the TensorIndex appears in the list of ConstantsToBeCreated then return true
5053 return (std::find(m_ConstantsToBeCreated.begin(), m_ConstantsToBeCreated.end(), tensorIndex)
5054 != m_ConstantsToBeCreated.end());
5055}
5056
Finn Williamsd4fa5452021-03-01 12:31:41 +00005057bool TfLiteParserImpl::IsConstTensor(TensorRawPtr tensorPtr)
5058{
5059 CHECK_TENSOR_PTR(tensorPtr);
mathad01bf7edb62021-04-20 16:12:45 +01005060 bool isConst = true;
5061
5062 auto buffer = GetBuffer(m_Model, tensorPtr->buffer);
5063 if (buffer->data.size() == 0)
5064 {
5065 isConst = false;
5066 }
5067
5068 return isConst;
Finn Williamsd4fa5452021-03-01 12:31:41 +00005069}
5070
Kevin May7d96b162021-02-03 17:38:41 +00005071std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
Finn Williamsd4fa5452021-03-01 12:31:41 +00005072TfLiteParserImpl::CreateConstTensorPermuted(TensorRawPtr tensorPtr,
5073 armnn::TensorInfo& tensorInfo,
5074 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01005075{
5076 CHECK_TENSOR_PTR(tensorPtr);
5077 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5078 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5079
Matthew Sloyan81beae32021-07-13 19:46:11 +01005080 // Make sure isConstant flag is set.
5081 tensorInfo.SetConstant();
5082
telsoa01c577f2c2018-08-31 09:22:23 +01005083 switch (tensorInfo.GetDataType())
5084 {
5085 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005086 return CreateConstTensorAndStoreData<float>(bufferPtr,
5087 tensorPtr,
5088 tensorInfo,
5089 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00005090 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005091 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
5092 tensorPtr,
5093 tensorInfo,
5094 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00005095 case armnn::DataType::QSymmS8:
5096 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
5097 tensorPtr,
5098 tensorInfo,
5099 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00005100 case armnn::DataType::QAsymmS8:
5101 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
5102 tensorPtr,
5103 tensorInfo,
5104 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01005105 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005106 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
5107 tensorPtr,
5108 tensorInfo,
5109 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01005110 default:
5111 {
5112 std::stringstream errString;
5113 errString << "Unexpected datatype when creating const tensor: "
5114 << armnn::GetDataTypeName(tensorInfo.GetDataType())
5115 << " shape:" << tensorInfo.GetShape()
5116 << CHECK_LOCATION().AsString();
5117 throw ParseException(errString.str());
5118 }
5119 }
5120}
5121
Finn Williamsd4fa5452021-03-01 12:31:41 +00005122armnn::ConstTensor TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
5123 armnn::TensorInfo& tensorInfo)
5124{
5125 CHECK_TENSOR_PTR(tensorPtr);
5126 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5127 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5128
Matthew Sloyan81beae32021-07-13 19:46:11 +01005129 // Make sure isConstant flag is set.
5130 tensorInfo.SetConstant();
5131
Finn Williamsd4fa5452021-03-01 12:31:41 +00005132 return ConstTensor(tensorInfo, bufferPtr->data.data());
5133}
5134
Mike Kelly5880b912022-01-28 16:18:54 +00005135std::pair<armnn::ConstTensor, std::unique_ptr<float[]>>
5136TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
5137 armnn::TensorInfo& tensorInfo,
5138 armnn::DataType inputDataType)
5139{
5140 CHECK_TENSOR_PTR(tensorPtr);
5141 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5142 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5143
5144 // Make sure isConstant flag is set.
5145 tensorInfo.SetConstant();
5146
Mike Kelly0506ef02023-01-03 16:29:44 +00005147 if (inputDataType == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
Mike Kelly5880b912022-01-28 16:18:54 +00005148 {
Mike Kelly0506ef02023-01-03 16:29:44 +00005149 try
5150 {
5151 TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
5152 std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
5153 return std::make_pair(ConstTensor(constTensorInfo, data.get()), std::move(data));
5154 }
Cathal Corbett9c843c32023-01-09 17:51:37 +00005155 catch (InvalidArgumentException&)
Mike Kelly0506ef02023-01-03 16:29:44 +00005156 {
5157 throw ParseException(
5158 fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
5159 GetDataTypeName(DataType::Float32),
5160 GetDataTypeName(tensorInfo.GetDataType()),
5161 CHECK_LOCATION().AsString()));
5162 }
Mike Kelly5880b912022-01-28 16:18:54 +00005163 }
5164 else
5165 {
5166 return std::make_pair(ConstTensor(tensorInfo, bufferPtr->data.data()), std::unique_ptr<float[]>());
5167 }
5168}
5169
5170std::pair<armnn::ConstTensor*, std::unique_ptr<float[]>>
5171TfLiteParserImpl::CreateConstTensorPtr(TensorRawPtr tensorPtr, armnn::TensorInfo& inputTensorInfo)
5172{
5173 CHECK_TENSOR_PTR(tensorPtr);
5174 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5175 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5176 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5177
5178 // Make sure isConstant flag is set.
5179 tensorInfo.SetConstant();
5180
5181 if (inputTensorInfo.GetDataType() == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
5182 {
Mike Kelly0506ef02023-01-03 16:29:44 +00005183 try
5184 {
5185 TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
5186 std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
5187 return std::make_pair(new ConstTensor(constTensorInfo, data.get()), std::move(data));
5188 }
Cathal Corbett9c843c32023-01-09 17:51:37 +00005189 catch (InvalidArgumentException&)
Mike Kelly0506ef02023-01-03 16:29:44 +00005190 {
5191 throw ParseException(
5192 fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
5193 GetDataTypeName(DataType::Float32),
5194 GetDataTypeName(tensorInfo.GetDataType()),
5195 CHECK_LOCATION().AsString()));
5196 }
Mike Kelly5880b912022-01-28 16:18:54 +00005197 }
5198 else
5199 {
5200 return std::make_pair(new ConstTensor(tensorInfo, bufferPtr->data.data()), std::unique_ptr<float[]>());
5201 }
5202}
5203
Kevin May7d96b162021-02-03 17:38:41 +00005204BindingPointInfo TfLiteParserImpl::GetNetworkInputBindingInfo(size_t subgraphId,
5205 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01005206{
5207 CHECK_SUBGRAPH(m_Model, subgraphId);
5208 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005209 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005210 {
5211 if (input.second->name == name)
5212 {
5213 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
Colm Donelan4bc993b2021-11-09 20:39:10 +00005214 auto inputTensorInfo = ToTensorInfo(input.second);
5215 // Input tensors are always treated as constant tensors during network execution.
5216 inputTensorInfo.SetConstant(true);
5217 return std::make_pair(bindingId, inputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01005218 }
5219 }
5220
5221 std::stringstream bindings;
Mike Kelly0d77ae12022-01-07 17:42:27 +00005222 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005223 {
5224 bindings << "'" << input.second->name << "' ";
5225 }
5226
5227 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005228 fmt::format("No input binding found for subgraph:{} and name:{}. "
5229 "Possible inputs are: [{}] {}",
5230 subgraphId,
5231 name,
5232 bindings.str(),
5233 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005234}
5235
Kevin May7d96b162021-02-03 17:38:41 +00005236BindingPointInfo TfLiteParserImpl::GetNetworkOutputBindingInfo(size_t subgraphId,
5237 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01005238{
5239 CHECK_SUBGRAPH(m_Model, subgraphId);
5240 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005241 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005242 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005243 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01005244 if (output.second->name == name)
5245 {
5246 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Mike Kelly377fb212023-01-10 15:55:28 +00005247 std::vector<unsigned int> shape = m_OverriddenOutputShapes.size() > 0 ?
5248 m_OverriddenOutputShapes[i] : AsUnsignedVector(output.second->shape);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005249 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01005250 }
5251 }
5252
5253 std::stringstream bindings;
Mike Kelly0d77ae12022-01-07 17:42:27 +00005254 for (auto const& output : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005255 {
5256 bindings << "'" << output.second->name << "' ";
5257 }
5258
5259 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005260 fmt::format("No output binding found for subgraph:{} and name:{}. "
5261 "Possible outputs are: [{}] {}",
5262 subgraphId,
5263 name,
5264 bindings.str(),
5265 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005266}
5267
Kevin May7d96b162021-02-03 17:38:41 +00005268size_t TfLiteParserImpl::GetSubgraphCount() const
telsoa01c577f2c2018-08-31 09:22:23 +01005269{
5270 return m_Model->subgraphs.size();
5271}
5272
Kevin May7d96b162021-02-03 17:38:41 +00005273std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01005274{
5275 CHECK_SUBGRAPH(m_Model, subgraphId);
5276 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
5277 std::vector<std::string> result;
5278 result.reserve(inputs.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +00005279 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005280 {
5281 result.push_back(input.second->name);
5282 }
5283 return result;
5284}
5285
Kevin May7d96b162021-02-03 17:38:41 +00005286std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01005287{
5288 CHECK_SUBGRAPH(m_Model, subgraphId);
5289 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
5290 std::vector<std::string> result;
5291 result.reserve(outputs.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +00005292 for (auto const& output : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005293 {
5294 result.push_back(output.second->name);
5295 }
5296 return result;
5297}
5298
Matthew Sloyanac001ee2021-02-03 10:43:04 +00005299const std::string TfLiteParserImpl::GetVersion()
5300{
5301 return TFLITE_PARSER_VERSION;
5302}
5303
Mike Kelly0d77ae12022-01-07 17:42:27 +00005304TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005305: m_FloatData(std::move(data))
5306, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00005307, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01005308, m_Int32Data(nullptr)
5309{
5310}
5311
Mike Kelly0d77ae12022-01-07 17:42:27 +00005312TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005313: m_FloatData(nullptr)
5314, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00005315, m_Int8Data(nullptr)
5316, m_Int32Data(nullptr)
5317{
5318}
5319
Mike Kelly0d77ae12022-01-07 17:42:27 +00005320TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]>&& data)
Keith Davisd305e1a2020-01-22 11:57:54 +00005321: m_FloatData(nullptr)
5322, m_Uint8Data(nullptr)
5323, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01005324, m_Int32Data(nullptr)
5325{
5326}
5327
Mike Kelly0d77ae12022-01-07 17:42:27 +00005328TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005329: m_FloatData(nullptr)
5330, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00005331, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01005332, m_Int32Data(std::move(data))
5333{
5334}
5335
5336} // armnnTfLiteParser