blob: 9e8af66b49da4e3949f729688ed360b3b609a2a7 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kelly04d82292023-01-19 18:29:40 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Sloyanac001ee2021-02-03 10:43:04 +00008#include "armnnTfLiteParser/Version.hpp"
Mike Kelly5880b912022-01-28 16:18:54 +00009#include "armnn/LstmParams.hpp"
Matthew Sloyanac001ee2021-02-03 10:43:04 +000010
Sadik Armagand109a4d2020-07-28 10:42:13 +010011#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +000012#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010013#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000014#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010015#include <armnn/Tensor.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000016#include <armnnUtils/TensorUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010017#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010018#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000019#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010020#include <armnn/utility/NumericCast.hpp>
Mike Kelly377fb212023-01-10 15:55:28 +000021#include <armnn/LayerSupport.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022
23// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000024#include <armnnUtils/Permute.hpp>
Rob Hughes9542f902021-07-14 09:48:54 +010025#include <armnnUtils/Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000026
Sadik Armagan479045b2018-10-01 11:51:37 +010027#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010028#include <VerificationHelpers.hpp>
29
30// The generated code based on the Tf Lite schema:
31#include <schema_generated.h>
32
Matteo Martincighe011d202019-11-28 11:35:47 +000033#include <flatbuffers/flexbuffers.h>
34
James Ward58dec6b2020-09-11 17:32:44 +010035#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010036
telsoa01c577f2c2018-08-31 09:22:23 +010037#include <algorithm>
Matthew Sloyanac001ee2021-02-03 10:43:04 +000038#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010039#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010040#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000041
42#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
43 { \
44 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
45 << ": " \
46 << CHECK_LOCATION().AsString()).str()); \
47 }
telsoa01c577f2c2018-08-31 09:22:23 +010048
49using namespace armnn;
50using armnn::CheckLocation;
51namespace armnnTfLiteParser
52{
Kevin May7d96b162021-02-03 17:38:41 +000053
54ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
55 pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
56
57ITfLiteParser::~ITfLiteParser() = default;
58
59ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
60{
61 return new ITfLiteParser(options);
62}
63
64ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
65{
66 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
67}
68
69void ITfLiteParser::Destroy(ITfLiteParser* parser)
70{
71 delete parser;
72}
73
74armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
75{
76 return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
77}
78
Mike Kelly0d77ae12022-01-07 17:42:27 +000079armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
Kevin May7d96b162021-02-03 17:38:41 +000080{
81 return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
82}
83
84BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
85 const std::string& name) const
86{
87 return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
88}
89
90BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
91 const std::string& name) const
92{
93 return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
94}
95
96size_t ITfLiteParser::GetSubgraphCount() const
97{
98 return pTfLiteParserImpl->GetSubgraphCount();
99}
100
101std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
102{
103 return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
104}
105
106std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
107{
108 return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
109}
110
telsoa01c577f2c2018-08-31 09:22:23 +0100111namespace
112{
jimfly01c25411c2018-11-14 17:47:22 +0000113
telsoa01c577f2c2018-08-31 09:22:23 +0100114const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
115
Mike Kelly0d77ae12022-01-07 17:42:27 +0000116void CheckSubgraph(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100117 size_t subgraphIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000118 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100119{
120 if (model.get() == nullptr)
121 {
122 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100123 fmt::format("{} was called with invalid (null) model. "
124 "Possible reason is that the model is not yet loaded and Unpack(ed). "
125 "subgraph:{} at {}",
126 location.m_Function,
127 subgraphIndex,
128 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100129 }
130 else if (subgraphIndex >= model->subgraphs.size())
131 {
132 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100133 fmt::format("{} was called with an invalid subgraph index. "
134 "subgraph:{} at {}",
135 location.m_Function,
136 subgraphIndex,
137 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100138 }
139}
140
141#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
142 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
143
Mike Kelly0d77ae12022-01-07 17:42:27 +0000144void CheckModel(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100145 size_t subgraphIndex,
146 size_t operatorIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000147 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100148{
149 if (model.get() == nullptr)
150 {
151 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100152 fmt::format("{} was called with invalid (null) model. "
153 "Possible reason is that the model is not yet loaded and Unpack(ed). "
154 "subgraph:{} operator:{} at {}",
155 location.m_Function,
156 subgraphIndex,
157 operatorIndex,
158 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100159 }
160 else if (subgraphIndex >= model->subgraphs.size())
161 {
162 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100163 fmt::format("{} was called with an invalid subgraph index. "
164 "subgraph:{} operator:{} at {}",
165 location.m_Function,
166 subgraphIndex,
167 operatorIndex,
168 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100169 }
170 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
171 operatorIndex != VIRTUAL_OPERATOR_ID)
172 {
173 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100174 fmt::format("{} was called with an invalid operator index. "
175 "subgraph:{} operator:{} at {}",
176 location.m_Function,
177 subgraphIndex,
178 operatorIndex,
179 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100180 }
181}
182
183#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
184 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
185
Mike Kelly0d77ae12022-01-07 17:42:27 +0000186void CheckTensor(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100187 size_t subgraphIndex,
188 size_t tensorIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000189 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100190{
191 // not checking model, because I assume CHECK_MODEL already run
192 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100193 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100194
195 // also subgraph index should be checked by CHECK_MODEL so
196 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100197 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100198
199 // the tensor index is the only one to check here
200 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
201 {
202 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100203 fmt::format("{} was called with an invalid tensor index. "
204 "subgraph:{} tensor:{} at {}",
205 location.m_Function,
206 subgraphIndex,
207 tensorIndex,
208 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100209 }
210}
211
212#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
213 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
214
Kevin May7d96b162021-02-03 17:38:41 +0000215void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000216 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100217{
218 if (rawPtr == nullptr)
219 {
220 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100221 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100222 }
223}
224
225#define CHECK_TENSOR_PTR(TENSOR_PTR) \
226 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
227
Mike Kelly0d77ae12022-01-07 17:42:27 +0000228void CheckBuffer(const TfLiteParserImpl::ModelPtr& model,
telsoa01c577f2c2018-08-31 09:22:23 +0100229 size_t bufferIndex,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000230 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100231{
232 if (model.get() == nullptr)
233 {
234 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100235 fmt::format("{} was called with invalid (null) model. "
236 "Possible reason is that the model is not yet loaded and Unpack(ed). "
237 "buffer:{} at {}",
238 location.m_Function,
239 bufferIndex,
240 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100241 }
242 else if (bufferIndex >= model->buffers.size())
243 {
244 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100245 fmt::format("{} was called with an invalid buffer index. "
246 "buffer index:{} at {}",
247 location.m_Function,
248 bufferIndex,
249 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100250 }
251 else if (model->buffers[bufferIndex].get() == nullptr)
252 {
253 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100254 fmt::format("The buffer #{} is null. {}",
255 bufferIndex,
256 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100257 }
258}
259
260#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
261 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
262
Kevin May7d96b162021-02-03 17:38:41 +0000263void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000264 const armnn::TensorInfo& tensorInfo,
telsoa01c577f2c2018-08-31 09:22:23 +0100265 uint32_t bufferId,
Mike Kelly0d77ae12022-01-07 17:42:27 +0000266 const CheckLocation& location)
telsoa01c577f2c2018-08-31 09:22:23 +0100267{
268 if (bufferPtr == nullptr)
269 {
270 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100271 fmt::format("BufferPtr is null for buffer:{}. {}",
272 bufferId,
273 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100274 }
275 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
276 tensorInfo.GetNumBytes() > bufferPtr->data.size())
277 {
278 std::stringstream ss;
279 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
280 << "For tensor: " << tensorInfo.GetShape()
281 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
282 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
283 throw ParseException(ss.str());
284 }
285}
286
Mike Kelly0d77ae12022-01-07 17:42:27 +0000287
288tflite::BuiltinOperator GetOpCode(const TfLiteParserImpl::ModelPtr& model, size_t subgraphIndex, size_t operatorIndex)
289{
290 const auto& operatorPtr = model->subgraphs[subgraphIndex]->operators[operatorIndex];
291 auto opcodeIndex = operatorPtr->opcode_index;
292
293// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
294#if defined(ARMNN_POST_TFLITE_2_3)
295 auto opcode = std::max(model->operator_codes[opcodeIndex]->builtin_code,
296 static_cast<tflite::BuiltinOperator>(model->operator_codes[opcodeIndex]->deprecated_builtin_code));
297#else
298 auto opcode = model->operator_codes[opcodeIndex]->builtin_code;
299#endif
300 return opcode;
301}
302
303std::vector<unsigned int> GetUIntBuffer(armnn::TensorInfo info,
304 const TfLiteParserImpl::ModelPtr& model,
305 size_t bufferIndex)
306{
307 TfLiteParserImpl::BufferRawPtr bufferPtr = TfLiteParserImpl::GetBuffer(model, bufferIndex);
308 std::vector<unsigned int> buffer(info.GetNumElements());
309
310 if (info.GetDataType() == DataType::Signed32)
311 {
312 ::memcpy(buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
313 }
314 else if (info.GetDataType() == DataType::Signed64)
315 {
316 std::vector<uint64_t> uint64Buffer(info.GetNumElements());
317 ::memcpy(uint64Buffer.data(), bufferPtr->data.data(), bufferPtr->data.size());
318 buffer.assign(std::begin(uint64Buffer), std::end(uint64Buffer));
319 }
Mike Kelly0506ef02023-01-03 16:29:44 +0000320 else
321 {
322 CheckLocation location = CHECK_LOCATION();
323 throw ParseException(
324 fmt::format("Unsupported data type for uint buffer {}, only Signed 32 or Signed 64 are supported. {}",
325 GetDataTypeName(info.GetDataType()),
326 location.AsString()));
327 }
Mike Kelly0d77ae12022-01-07 17:42:27 +0000328 return buffer;
329}
330
telsoa01c577f2c2018-08-31 09:22:23 +0100331#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
332 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
333
334bool IsActivationSupported(tflite::ActivationFunctionType activationType)
335{
336 switch(activationType)
337 {
338 case tflite::ActivationFunctionType_NONE:
339 case tflite::ActivationFunctionType_RELU:
340 case tflite::ActivationFunctionType_RELU6:
341 case tflite::ActivationFunctionType_TANH:
342 {
343 return true;
344 }
345 default:
346 {
347 return false;
348 }
349 }
350}
351
352#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
353 do { \
354 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
355 { \
356 throw ParseException( \
Mike Kelly377fb212023-01-10 15:55:28 +0000357 fmt::format("TfLite parser doesn't support fused activation: " \
James Ward58dec6b2020-09-11 17:32:44 +0100358 "{}/{} in {} subgraph:{} operator:{} at {}", \
359 OPTION->fused_activation_function, \
360 tflite::EnumNameActivationFunctionType(\
361 OPTION->fused_activation_function), \
362 __func__, \
363 SUBGRAPH_INDEX, \
364 OPERATOR_INDEX, \
365 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100366 } \
367 } while(false)
368
369
Mike Kelly0d77ae12022-01-07 17:42:27 +0000370std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t>& in)
telsoa01c577f2c2018-08-31 09:22:23 +0100371{
372 std::vector<unsigned int> result;
373 result.reserve(in.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +0000374 for (auto& i : in)
telsoa01c577f2c2018-08-31 09:22:23 +0100375 {
mathad01c21025d2021-04-26 10:09:37 +0100376 // If the location of the input data is -1 then the input should be ignored.
377 if (i == -1)
378 {
379 continue;
380 }
telsoa01c577f2c2018-08-31 09:22:23 +0100381 result.push_back(CHECKED_NON_NEGATIVE(i));
382 }
383 return result;
384}
385
Mike Kelly5880b912022-01-28 16:18:54 +0000386bool IsOptionalOperandPresent(int input)
387{
388 return (input >= 0);
389}
390
telsoa01c577f2c2018-08-31 09:22:23 +0100391void CalcPadding(uint32_t inputSize,
392 uint32_t filterSize,
393 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100394 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100395 uint32_t& paddingFront,
396 uint32_t& paddingBack,
397 tflite::Padding padding)
398{
399 paddingFront = 0;
400 paddingBack = 0;
401 if (padding == tflite::Padding_SAME)
402 {
403 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100404 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
405 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100406 if (temp > inputSize)
407 {
408 paddingFront = (temp - inputSize) / 2;
409 paddingBack = (temp - inputSize) - paddingFront;
410 }
411 }
412}
413
Kevin May7d96b162021-02-03 17:38:41 +0000414armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Finn Williamsb49ed182021-06-29 15:50:08 +0100415 const std::vector<unsigned int>& shape,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100416 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100417{
418 armnn::DataType type;
419 CHECK_TENSOR_PTR(tensorPtr);
420
421 switch (tensorPtr->type)
422 {
423 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000424 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100425 break;
426 case tflite::TensorType_FLOAT32:
427 type = armnn::DataType::Float32;
428 break;
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100429 case tflite::TensorType_FLOAT16:
430 type = armnn::DataType::Float16;
431 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000432 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000433 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000434 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000435 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000436 type = armnn::DataType::QAsymmS8;
437 }
438 else
439 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000440 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000441 type = armnn::DataType::QSymmS8;
442 }
Finn Williamsed66d142019-12-06 09:55:55 +0000443 break;
444 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000445 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000446 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100447 case tflite::TensorType_INT32:
448 type = armnn::DataType::Signed32;
449 break;
Inki Daed4619e22020-09-10 15:33:54 +0900450 case tflite::TensorType_INT64:
451 type = armnn::DataType::Signed64;
452 break;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100453 case tflite::TensorType_BOOL:
454 type = armnn::DataType::Boolean;
455 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100456 default:
457 {
458 CheckLocation location = CHECK_LOCATION();
459 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100460 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
461 tensorPtr->type,
462 tflite::EnumNameTensorType(tensorPtr->type),
463 tensorPtr->name,
464 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100465 }
466 }
Finn Williamsb49ed182021-06-29 15:50:08 +0100467 TensorShape tensorShape;
468
469 std::vector<unsigned int> safeShape = shape;
470 if (shape.size() == 0)
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100471 {
472 safeShape.push_back(1);
Finn Williamsb49ed182021-06-29 15:50:08 +0100473 }
474
475 if (!outputTensor)
476 {
477 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()), safeShape.data());
478 }
479 else
480 {
Rob Hughesd812a312021-08-06 13:10:53 +0100481 size_t shapeSignatureSize = tensorPtr->shape_signature.size();
Finn Williamsb49ed182021-06-29 15:50:08 +0100482
483 // If a shape signature exists we will use that to infer dynamic tensors
484 if (shapeSignatureSize != 0)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100485 {
Finn Williamsb49ed182021-06-29 15:50:08 +0100486 // If the shape is incompatible with the shape signature override the shape
487 if (shapeSignatureSize != shape.size())
488 {
489 safeShape = {};
490
491 for (unsigned int i = 0; i < shapeSignatureSize; ++i)
492 {
493 unsigned int dim = tensorPtr->shape_signature[i] > -1 ?
494 static_cast<unsigned int>(tensorPtr->shape_signature[i]) : 0;
495 safeShape.push_back(dim);
496 }
497 }
498
Rob Hughesd812a312021-08-06 13:10:53 +0100499 std::unique_ptr<bool[]> dimMask = std::make_unique<bool[]>(tensorPtr->shape_signature.size());
Mike Kelly04d82292023-01-19 18:29:40 +0000500 bool batchOnly = true;
Finn Williamsb49ed182021-06-29 15:50:08 +0100501 for (unsigned int i = 0; i < tensorPtr->shape_signature.size(); ++i)
502 {
Mike Kelly04d82292023-01-19 18:29:40 +0000503 dimMask[i] = tensorPtr->shape_signature[i] != -1;
504
505 if (i > 0 && !dimMask[i])
506 {
507 batchOnly = false;
508 }
509 }
510 if (batchOnly)
511 {
512 dimMask[0] = true;
Finn Williamsb49ed182021-06-29 15:50:08 +0100513 }
Rob Hughesd812a312021-08-06 13:10:53 +0100514 tensorShape = TensorShape(static_cast<unsigned int>(safeShape.size()), safeShape.data(), dimMask.get());
Finn Williamsb49ed182021-06-29 15:50:08 +0100515 }
516 // If there is no shape signature treat the tensor as dynamic if the shape has a size of zero
517 else if (shape.size() == 0)
518 {
519 tensorShape = TensorShape(1, false);
520 }
521 else
522 {
523 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(shape.size()), shape.data());
Sadik Armagand109a4d2020-07-28 10:42:13 +0100524 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100525 }
526
Keith Davisd305e1a2020-01-22 11:57:54 +0000527 float quantizationScale = 0.0f;
528 int32_t quantizationOffset = 0;
529
530 if (tensorPtr->quantization.get())
531 {
532 if (tensorPtr->quantization->scale.size() <= 1)
533 {
534 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
535 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
536
537 if (tensorPtr->quantization->scale.size() == 1)
538 {
539 quantizationScale = tensorPtr->quantization->scale[0];
540 }
541 if (tensorPtr->quantization->zero_point.size() == 1)
542 {
543 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000544 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100545 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000546 }
547
Sadik Armagand109a4d2020-07-28 10:42:13 +0100548 armnn::TensorInfo result(tensorShape,
549 type,
550 quantizationScale,
551 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000552 return result;
553 }
554 else
555 {
556 std::vector<float> quantizationScales;
557 std::vector<int32_t> quantizationOffsets;
558
559 // Scale
560 std::copy(tensorPtr->quantization->scale.begin(),
561 tensorPtr->quantization->scale.end(),
562 std::back_inserter(quantizationScales));
563
Keith Davis0c2eeac2020-02-11 16:51:50 +0000564 // QSymmS8 Per-axis
Sadik Armagand109a4d2020-07-28 10:42:13 +0100565 armnn::TensorInfo result(tensorShape,
566 type,
567 quantizationScales,
Jan Eilers7612bd62021-04-06 17:29:03 +0100568 armnn::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
Keith Davisd305e1a2020-01-22 11:57:54 +0000569 return result;
570 }
571 }
572 else
573 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100574 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000575 type,
576 quantizationScale,
577 quantizationOffset);
578 return result;
579 }
telsoa01c577f2c2018-08-31 09:22:23 +0100580}
581
Kevin May7d96b162021-02-03 17:38:41 +0000582armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Mike Kelly377fb212023-01-10 15:55:28 +0000583 const bool outputTensor = false)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100584{
Mike Kelly0d77ae12022-01-07 17:42:27 +0000585 auto const& dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100586 return ToTensorInfo(tensorPtr, dimensions, outputTensor);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100587}
588
telsoa01c577f2c2018-08-31 09:22:23 +0100589template<typename T>
590std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
Kevin May7d96b162021-02-03 17:38:41 +0000591CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
592 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000593 armnn::TensorInfo& tensorInfo,
594 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100595{
Jan Eilers8eb25602020-03-09 12:13:48 +0000596 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100597 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
598 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
James Ward58dec6b2020-09-11 17:32:44 +0100599 fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
telsoa01c577f2c2018-08-31 09:22:23 +0100600
601 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000602
603 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
604 {
605 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000606 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
607 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000608 }
609 else
610 {
611 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
612 }
613
Matthew Sloyan81beae32021-07-13 19:46:11 +0100614 // Make sure isConstant flag is set.
615 tensorInfo.SetConstant();
616
telsoa01c577f2c2018-08-31 09:22:23 +0100617 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
618}
619
telsoa01c577f2c2018-08-31 09:22:23 +0100620armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
621{
622 // generate the binding id by shifting the tensor id by 8 bit
623 // and add the subgraph id, which allows 256 subgraphs
624 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
625}
626
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000627bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
628{
629 const unsigned int actualSize = actual.GetNumDimensions();
630 if (actualSize != expected.size())
631 {
632 return false;
633 }
634
635 for (unsigned int i = 0u; i < actualSize; i++)
636 {
637 if (expected[i] < 0 ||
638 actual[i] != static_cast<unsigned int>(expected[i]))
639 {
640 return false;
641 }
642 }
643
644 return true;
645}
646
Cathal Corbett2b922e22022-09-23 15:49:24 +0100647bool CheckShape(const armnn::TensorShape& actual, const armnn::TensorShape& expected)
648{
649 std::vector<int32_t> expectedVec;
650 for (uint32_t i = 0; i < expected.GetNumDimensions(); i++)
651 {
652 expectedVec.push_back(expected[i]);
653 }
654 return CheckShape(actual, expectedVec);
655}
656
James Conroy05102392020-06-24 15:39:55 +0100657void CheckMatchingQuantization(const TensorInfo& first,
658 const TensorInfo& second,
659 const std::string& descName,
660 std::string const& firstName,
661 std::string const& secondName)
662{
663 if (!first.IsQuantized() ||
664 !second.IsQuantized())
665 {
666 // Not a quantized type, ignore the validation
667 return;
668 }
669
670 DataType firstDataType = first.GetDataType();
671 DataType secondDataType = second.GetDataType();
672
673 if (firstDataType != secondDataType)
674 {
675 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
676 " must be of the same quantized type, " +
677 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
678 secondName + " is " + GetDataTypeName(secondDataType));
679 }
680
681 if (!first.IsTypeSpaceMatch(second))
682 {
683 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
684 " must have the same quantization space, " +
685 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
686 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
687 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
688 " and scale " + std::to_string(second.GetQuantizationScale()));
689 }
690}
691
Mike Kelly377fb212023-01-10 15:55:28 +0000692bool IsDynamic(TfLiteParserImpl::TensorRawPtr tensorPtr)
693{
694 auto shape = tensorPtr->shape;
695
696 if (shape.empty())
697 {
698 return true;
699 }
700 auto shapeSig = tensorPtr->shape_signature;
701
702 if (shapeSig.empty())
703 {
704 return false;
705 }
706
707 for (unsigned int i = 0; i < shapeSig.size() ; ++i)
708 {
709 if (shapeSig[i] == -1)
710 {
711 return true;
712 }
713 }
714 return false;
715}
716
telsoa01c577f2c2018-08-31 09:22:23 +0100717} // <anonymous>
718
Kevin May7d96b162021-02-03 17:38:41 +0000719TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100720: m_Options(options)
721, m_Network(nullptr, nullptr)
Kevin May7d96b162021-02-03 17:38:41 +0000722, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
telsoa01c577f2c2018-08-31 09:22:23 +0100723{
724 // register supported operators
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100725 m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
Kevin May7d96b162021-02-03 17:38:41 +0000726 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100727 m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
728 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
Kevin May7d96b162021-02-03 17:38:41 +0000729 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
730 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
Samuel Yapfd3ba5a2022-08-24 17:04:34 +0100731 m_ParserFunctions[tflite::BuiltinOperator_BATCH_MATMUL] = &TfLiteParserImpl::ParseBatchMatMul;
mathad01b392e982021-04-07 12:07:30 +0100732 m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
Kevin May7d96b162021-02-03 17:38:41 +0000733 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
734 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100735 // Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
Cathal Corbett80b4ef02022-05-25 11:21:11 +0100736 #if defined(ARMNN_POST_TFLITE_2_4)
Matthew Sloyaneb5f8102021-10-05 17:31:42 +0100737 m_ParserFunctions[tflite::BuiltinOperator_CONV_3D] = &TfLiteParserImpl::ParseConv3D;
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100738 #endif
Kevin May7d96b162021-02-03 17:38:41 +0000739 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
740 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
741 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
742 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100743 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000744 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300745 m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000746 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
Teresa Charlin3ab85482021-06-08 16:59:29 +0100747 m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
Teresa Charlincdbd40b2022-02-25 13:21:55 +0000748 m_ParserFunctions[tflite::BuiltinOperator_FLOOR_DIV] = &TfLiteParserImpl::ParseFloorDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000749 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
750 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
Teresa Charlin91a53ea2022-04-25 15:47:29 +0100751 m_ParserFunctions[tflite::BuiltinOperator_GATHER_ND] = &TfLiteParserImpl::ParseGatherNd;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300752 m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
753 m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000754 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
755 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300756 m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
757 m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
Mike Kelly31dce2b2021-09-01 21:22:37 +0100758 m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
759 = &TfLiteParserImpl::ParseLocalResponseNormalization;
Teresa Charlin28aa6692022-07-12 11:18:44 +0100760 m_ParserFunctions[tflite::BuiltinOperator_LOG] = &TfLiteParserImpl::ParseLog;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100761 m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
Kevin May7d96b162021-02-03 17:38:41 +0000762 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
Teresa Charlinfd33a692022-06-29 15:35:57 +0100763 m_ParserFunctions[tflite::BuiltinOperator_LOG_SOFTMAX] = &TfLiteParserImpl::ParseLogSoftmax;
Kevin May7d96b162021-02-03 17:38:41 +0000764 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
765 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
766 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
767 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
768 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +0100769 m_ParserFunctions[tflite::BuiltinOperator_MIRROR_PAD] = &TfLiteParserImpl::ParseMirrorPad;
Kevin May7d96b162021-02-03 17:38:41 +0000770 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
771 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300772 m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000773 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
774 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
Mike Kelly0d77ae12022-01-07 17:42:27 +0000775 m_ParserFunctions[tflite::BuiltinOperator_PADV2] = &TfLiteParserImpl::ParsePad;
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +0100776 m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
Kevin May7d96b162021-02-03 17:38:41 +0000777 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
778 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
779 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
Sadik Armagana2747482021-02-09 10:28:54 +0000780 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
781 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
Teresa Charlin4e3e8312021-08-05 12:34:37 +0100782 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
Kevin May7d96b162021-02-03 17:38:41 +0000783 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
784 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
785 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100786 m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
Teresa Charlinf0fce5b2022-05-04 17:24:43 +0100787 m_ParserFunctions[tflite::BuiltinOperator_SQRT] = &TfLiteParserImpl::ParseSqrt;
Keith Davis0176fd82021-06-01 17:36:32 +0100788 m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
Teresa Charlin28aa6692022-07-12 11:18:44 +0100789 m_ParserFunctions[tflite::BuiltinOperator_SIN] = &TfLiteParserImpl::ParseSin;
Kevin May7d96b162021-02-03 17:38:41 +0000790 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
791 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
792 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
793 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
794 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
795 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
796 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
797 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
798 m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
799 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
800 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
801 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
Mike Kelly5880b912022-01-28 16:18:54 +0000802 m_ParserFunctions[tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM]
803 = &TfLiteParserImpl::ParseUnidirectionalSequenceLSTM;
Kevin May7d96b162021-02-03 17:38:41 +0000804 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100805
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100806 // register supported custom operators
Kevin May7d96b162021-02-03 17:38:41 +0000807 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100808}
809
Mike Kelly377fb212023-01-10 15:55:28 +0000810armnn::TensorInfo TfLiteParserImpl::InputTensorInfo(size_t subgraphIndex,
811 size_t operatorIndex,
812 int input)
813{
814 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
815 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
816
817 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[input]);
818 auto search = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(inputId);
819
820 if (search != m_TensorInfos.end())
821 {
822 return m_TensorInfos[inputId];
823 }
824 else
825 {
826 auto tensorInfo = ::armnnTfLiteParser::ToTensorInfo(subgraphPtr->tensors[inputId].get());
827 m_TensorInfos.insert({ inputId, tensorInfo });
828 return tensorInfo;
829 }
830}
831
832armnn::TensorInfo TfLiteParserImpl::OutputTensorInfoFromInputs(size_t subgraphIndex,
833 size_t operatorIndex,
834 armnn::IConnectableLayer* layer,
835 int output,
836 std::vector<int> inputs)
837{
838 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
839 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
840
841 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[output]);
842
843 auto outputSearch = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(outputId);
844
845 if (outputSearch != m_TensorInfos.end())
846 {
847 return m_TensorInfos[outputId];
848 }
849
850 const auto& outputTensorPtr = subgraphPtr->tensors[outputId].get();
851 TensorInfo tensor = ::armnnTfLiteParser::ToTensorInfo(outputTensorPtr, true);
852
853 if (IsDynamic(outputTensorPtr))
854 {
855 if (inputs.empty())
856 {
857 for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
858 {
859 inputs.emplace_back(i);
860 }
861 }
862 auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex);
863 std::vector<armnn::TensorShape> inputShapes;
864
865 for (unsigned int i = 0; i < inputs.size(); ++i)
866 {
867 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[inputs[i]]);
868 auto search = armnnTfLiteParser::TfLiteParserImpl::m_TensorInfos.find(inputId);
869
870 if (search != m_TensorInfos.end())
871 {
872 auto &inputTensorInfo = m_TensorInfos[inputId];
873 inputShapes.push_back(inputTensorInfo.GetShape());
874 }
875 else
876 {
877 m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
878 auto inputTensorInfo = ::armnnTfLiteParser::ToTensorInfo(subgraphPtr->tensors[inputId].get());
879 m_TensorInfos.insert({ inputId, inputTensorInfo});
880 inputShapes.push_back(inputTensorInfo.GetShape());
881 }
882 }
883 const auto outputShape = layer->InferOutputShapes(inputShapes)[output];
884 tensor.SetShape(outputShape);
885 }
886 m_TensorInfos.insert({ outputId, tensor});
887 return tensor;
888}
889
890armnn::TensorInfo TfLiteParserImpl::OutputTensorInfoFromShapes(size_t subgraphIndex,
891 size_t operatorIndex,
892 armnn::IConnectableLayer* layer,
893 int output,
894 std::vector<armnn::TensorShape> inputShapes)
895{
896 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
897 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
898
899 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[output]);
900 const auto& outputTensorPtr = subgraphPtr->tensors[outputId].get();
901 TensorInfo tensor = ::armnnTfLiteParser::ToTensorInfo(outputTensorPtr, true);
902
903 if (IsDynamic(outputTensorPtr))
904 {
905 const auto outputShape = layer->InferOutputShapes(inputShapes)[output];
906 tensor.SetShape(outputShape);
907 }
908 m_TensorInfos.insert({ outputId, tensor});
909 return tensor;
910}
911
Kevin May7d96b162021-02-03 17:38:41 +0000912void TfLiteParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100913{
914 m_Network = armnn::INetworkPtr(nullptr, nullptr);
915 m_Model = nullptr;
916 m_SubgraphConnections.clear();
Mike Kelly377fb212023-01-10 15:55:28 +0000917 m_OverriddenOutputShapes.clear();
Mike Kelly5880b912022-01-28 16:18:54 +0000918 m_ConstantsToDequantize.clear();
919 m_ConstantsToBeCreated.clear();
Mike Kelly377fb212023-01-10 15:55:28 +0000920 m_TensorInfos.clear();
telsoa01c577f2c2018-08-31 09:22:23 +0100921}
922
Kevin May7d96b162021-02-03 17:38:41 +0000923INetworkPtr TfLiteParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100924{
925 ResetParser();
926 m_Model = LoadModelFromFile(graphFile);
927 return CreateNetworkFromModel();
928}
929
Mike Kelly0d77ae12022-01-07 17:42:27 +0000930INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
telsoa01c577f2c2018-08-31 09:22:23 +0100931{
932 ResetParser();
933 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
934 return CreateNetworkFromModel();
935}
936
Finn Williamsb49ed182021-06-29 15:50:08 +0100937
938armnn::INetworkPtr TfLiteParserImpl::LoadModel(std::unique_ptr<tflite::ModelT> model)
939{
940 ResetParser();
941 m_Model = std::move(model);
942
943 return CreateNetworkFromModel();
944}
945
Kevin May7d96b162021-02-03 17:38:41 +0000946INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
telsoa01c577f2c2018-08-31 09:22:23 +0100947{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100948
949 using NetworkOptions = std::vector<BackendOptions>;
950 NetworkOptions networkOptions = {};
Mike Kelly80512b02022-05-16 23:10:42 +0100951 if (m_Options)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100952 {
Mike Kelly80512b02022-05-16 23:10:42 +0100953 if (m_Options.value().m_InferAndValidate)
954 {
955 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
956 {
957 { "InferAndValidate", true }
958 });
Sadik Armagand109a4d2020-07-28 10:42:13 +0100959
Mike Kelly80512b02022-05-16 23:10:42 +0100960 networkOptions.push_back(shapeInferenceMethodOption);
961 }
962 if (m_Options.value().m_AllowExpandedDims)
963 {
964 BackendOptions shapeInferenceMethodOption("AllowExpandedDims",
965 {
966 { "AllowExpandedDims", true }
967 });
968
969 networkOptions.push_back(shapeInferenceMethodOption);
970 }
Sadik Armagand109a4d2020-07-28 10:42:13 +0100971 }
Sadik Armagand109a4d2020-07-28 10:42:13 +0100972 m_Network = INetwork::Create(networkOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100973 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100974
telsoa01c577f2c2018-08-31 09:22:23 +0100975 if (m_Model->subgraphs.size() != 1)
976 {
977 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100978 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
979 m_Model->subgraphs.size(),
980 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100981 }
982
983 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +0100984 size_t operatorIndex = 0;
985 try
telsoa01c577f2c2018-08-31 09:22:23 +0100986 {
Colm Donelan6350d272020-06-09 16:56:25 +0100987 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100988 {
Mike Kelly377fb212023-01-10 15:55:28 +0000989 SetupInputLayerTensorInfos(subgraphIndex);
990 SetupConstantLayerTensorInfos(subgraphIndex);
991
Colm Donelan6350d272020-06-09 16:56:25 +0100992 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
993 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +0100994 {
Colm Donelan6350d272020-06-09 16:56:25 +0100995 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
Jim Flynnfca233e2021-09-23 12:16:53 +0100996
997// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
Matthew Sloyan4d217c02021-10-07 11:48:58 +0100998#if defined(ARMNN_POST_TFLITE_2_3)
Jim Flynnfca233e2021-09-23 12:16:53 +0100999 auto builtinCode = std::max(opCodePtr->builtin_code,
1000 static_cast<tflite::BuiltinOperator>(opCodePtr->deprecated_builtin_code));
1001#else
telsoa01c577f2c2018-08-31 09:22:23 +01001002 auto builtinCode = opCodePtr->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +01001003#endif
telsoa01c577f2c2018-08-31 09:22:23 +01001004
1005 if (builtinCode > tflite::BuiltinOperator_MAX)
1006 {
James Ward58dec6b2020-09-11 17:32:44 +01001007 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
1008 "subgraph:{} operator idx:{}. {}",
1009 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
1010 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01001011 }
1012
1013 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +01001014 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +01001015 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +01001016 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +01001017 }
telsoa01c577f2c2018-08-31 09:22:23 +01001018
Colm Donelan6350d272020-06-09 16:56:25 +01001019 SetupInputLayers(subgraphIndex);
1020 SetupOutputLayers(subgraphIndex);
1021 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001022
Colm Donelan6350d272020-06-09 16:56:25 +01001023 ++subgraphIndex;
1024 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001025 }
telsoa01c577f2c2018-08-31 09:22:23 +01001026 }
Colm Donelan6350d272020-06-09 16:56:25 +01001027 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +01001028 {
Colm Donelan6350d272020-06-09 16:56:25 +01001029 std::stringstream errorString;
1030 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
1031 << subgraphIndex << " error: " << e.what();
1032 ARMNN_LOG(error) << errorString.str();
1033 std::stringstream errors;
1034 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +01001035 throw ParseException(errors.str());
1036 }
1037
1038 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +01001039 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001040 {
1041 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
1042 {
1043 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
1044 {
1045 for (size_t inputSlotIdx = 0;
1046 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
1047 ++inputSlotIdx)
1048 {
1049 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
1050 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
1051 }
1052 }
1053 }
1054 }
telsoa01c577f2c2018-08-31 09:22:23 +01001055 return std::move(m_Network);
1056}
1057
Mike Kelly0506ef02023-01-03 16:29:44 +00001058bool TfLiteParserImpl::ShouldConstantTensorBeConverted(TfLiteParserImpl::TensorRawPtr tensorPtr,
1059 armnn::DataType inputDataType,
1060 armnn::DataType tensorDataType)
Mike Kelly5880b912022-01-28 16:18:54 +00001061{
Mike Kelly0506ef02023-01-03 16:29:44 +00001062 return (TfLiteParserImpl::IsConstTensor(tensorPtr) && inputDataType == DataType::Float32 &&
1063 (tensorDataType == DataType::QAsymmU8 ||
1064 tensorDataType == DataType::QAsymmS8 ||
1065 tensorDataType == DataType::QSymmS8 ||
1066 tensorDataType == DataType::Signed32 ||
1067 tensorDataType == DataType::Signed64));
Mike Kelly5880b912022-01-28 16:18:54 +00001068}
1069
Kevin May7d96b162021-02-03 17:38:41 +00001070void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
1071 size_t tensorIndex,
1072 armnn::IOutputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +01001073{
1074 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001075 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
1076 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001077
1078 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
1079
Nikhil Rajd4d1c312022-08-03 18:20:59 +01001080 if (slot->GetOwningIConnectableLayer().GetType() != LayerType::Constant)
telsoa01c577f2c2018-08-31 09:22:23 +01001081 {
telsoa01c577f2c2018-08-31 09:22:23 +01001082
Nikhil Rajd4d1c312022-08-03 18:20:59 +01001083 // assuming there is only one producer for that tensor
1084 if (tensorSlots.outputSlot != nullptr)
1085 {
1086 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
1087 "subgraph:{} tensor:{} {}",
1088 subgraphIndex,
1089 tensorIndex,
1090 CHECK_LOCATION().AsString()));
1091 }
1092 }
telsoa01c577f2c2018-08-31 09:22:23 +01001093 tensorSlots.outputSlot = slot;
1094}
1095
Kevin May7d96b162021-02-03 17:38:41 +00001096void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
1097 size_t tensorIndex,
1098 armnn::IInputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +01001099{
1100 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001101 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
1102 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001103
Finn Williamsd4fa5452021-03-01 12:31:41 +00001104 TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01001105 tensorSlots.inputSlots.push_back(slot);
1106}
1107
Kevin May7d96b162021-02-03 17:38:41 +00001108void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001109{
1110 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1111
1112 // NOTE: By default we presume the custom operator is not supported
Kevin May7d96b162021-02-03 17:38:41 +00001113 auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001114
1115 // Identify custom code defined for custom operator
1116 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1117 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
1118
Mike Kelly377fb212023-01-10 15:55:28 +00001119 // Find parser function that corresponds to custom code (if any)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001120 auto iterator = m_CustomParserFunctions.find(customCode);
1121 if (iterator != m_CustomParserFunctions.end())
1122 {
1123 customParserFunction = iterator->second;
1124 }
1125
1126 // Run parser function
1127 (this->*customParserFunction)(subgraphIndex, operatorIndex);
1128}
1129
Kevin May7d96b162021-02-03 17:38:41 +00001130void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001131{
1132 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001133
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001134 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1135
1136 auto opcodeIndex = operatorPtr->opcode_index;
Jim Flynnfca233e2021-09-23 12:16:53 +01001137
1138// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001139#if defined(ARMNN_POST_TFLITE_2_3)
Jim Flynnfca233e2021-09-23 12:16:53 +01001140 auto opcode = std::max(m_Model->operator_codes[opcodeIndex]->builtin_code,
1141 static_cast<tflite::BuiltinOperator>(m_Model->operator_codes[opcodeIndex]->deprecated_builtin_code));
1142#else
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001143 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +01001144#endif
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001145
1146 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
1147 {
1148 // Do not add StandInLayer, throw ParseException instead
1149 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001150 fmt::format("Operator not supported. "
1151 "subgraph:{} operator:{} "
1152 "opcode_index:{} opcode:{} / {} {}",
1153 subgraphIndex,
1154 operatorIndex,
1155 opcodeIndex,
1156 opcode,
1157 tflite::EnumNameBuiltinOperator(opcode),
1158 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001159 }
1160
1161 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1162 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1163
Matthew Sloyan589e3e82020-09-11 16:17:48 +01001164 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
1165 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001166
1167 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +01001168 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001169
1170 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
1171 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001172 ARMNN_ASSERT(layer != nullptr);
1173
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001174 for (unsigned int i = 0u; i < numOutputs; ++i)
1175 {
Mike Kelly04d82292023-01-19 18:29:40 +00001176 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[0], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01001177 }
1178
1179 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1180 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1181
1182 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
1183 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +01001184}
1185
mathad01b392e982021-04-07 12:07:30 +01001186void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex)
1187{
1188 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1189
1190 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1191 CHECK_VALID_SIZE(inputs.size(), 1);
1192 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1193 CHECK_VALID_SIZE(outputs.size(), 1);
1194
1195 auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex);
1196
1197 IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str());
1198 ARMNN_ASSERT(layer != nullptr);
1199
Mike Kelly377fb212023-01-10 15:55:28 +00001200 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
mathad01b392e982021-04-07 12:07:30 +01001201 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1202
1203 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1204 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1205
1206 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1207 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1208}
1209
Kevin May7d96b162021-02-03 17:38:41 +00001210void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001211{
1212 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1213
Mike Kelly0d77ae12022-01-07 17:42:27 +00001214 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1215 const auto* options = operatorPtr->builtin_options.AsConv2DOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01001216
1217 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1218
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001219 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1220 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1221 CHECK_VALID_SIZE(outputs.size(), 1);
1222
telsoa01c577f2c2018-08-31 09:22:23 +01001223 Convolution2dDescriptor desc;
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001224 inputs.size() == 3 ?
1225 desc.m_BiasEnabled = true : desc.m_BiasEnabled = false;
telsoa01c577f2c2018-08-31 09:22:23 +01001226 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1227 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001228 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +01001229 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1230 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001231
Mike Kelly377fb212023-01-10 15:55:28 +00001232 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1233 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001234
1235 // assuming input is NHWC
1236 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001237 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
telsoa01c577f2c2018-08-31 09:22:23 +01001238
1239 // assuming the filter is OHWI : Output, H, W, Input
1240 // which is essentially the same as NHWC
1241 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001242 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
telsoa01c577f2c2018-08-31 09:22:23 +01001243
Pablo Tellof0bd6832019-04-26 17:58:13 +01001244 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1245 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1246 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1247 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001248
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001249 // Add the first input and weights tensor to the registration list.
1250 // The constant weights will be added by SetupConstantLayers.
1251 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1252 std::vector<unsigned int> tensorIndexesToRegister = { inputTensorIndexes[0], inputTensorIndexes[1] };
telsoa01c577f2c2018-08-31 09:22:23 +01001253
James Ward58dec6b2020-09-11 17:32:44 +01001254 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001255 armnn::IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, layerName.c_str());
telsoa01c577f2c2018-08-31 09:22:23 +01001256
Mike Kelly0506ef02023-01-03 16:29:44 +00001257 if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
telsoa01c577f2c2018-08-31 09:22:23 +01001258 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001259 m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
telsoa01c577f2c2018-08-31 09:22:23 +01001260 }
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001261
1262 if (desc.m_BiasEnabled)
telsoa01c577f2c2018-08-31 09:22:23 +01001263 {
Mike Kelly377fb212023-01-10 15:55:28 +00001264 armnn::TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001265
1266 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1267 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
1268
Mike Kelly0506ef02023-01-03 16:29:44 +00001269 if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001270 {
1271 m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
1272 }
telsoa01c577f2c2018-08-31 09:22:23 +01001273 }
1274
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001275 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001276
Mike Kelly377fb212023-01-10 15:55:28 +00001277 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
jimfly01c25411c2018-11-14 17:47:22 +00001278 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001279
1280 // register the input connection slots for the layer, connections are made after all layers have been created
1281 // only the tensors for the inputs are relevant, exclude the const tensors
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001282 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
telsoa01c577f2c2018-08-31 09:22:23 +01001283
jimfly01c25411c2018-11-14 17:47:22 +00001284 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001285 // register the output connection slots for the layer, connections are made after all layers have been created
1286 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001287 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, { outputTensorIndexes[0] });
telsoa01c577f2c2018-08-31 09:22:23 +01001288}
1289
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001290// Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
Cathal Corbett80b4ef02022-05-25 11:21:11 +01001291#if defined(ARMNN_POST_TFLITE_2_4)
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001292void TfLiteParserImpl::ParseConv3D(size_t subgraphIndex, size_t operatorIndex)
1293{
1294 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1295
1296 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1297 const auto* options = operatorPtr->builtin_options.AsConv3DOptions();
1298
1299 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1300
1301 Convolution3dDescriptor desc;
1302 desc.m_BiasEnabled = false;
1303 desc.m_DataLayout = armnn::DataLayout::NDHWC;
1304 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1305 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1306 desc.m_StrideZ = CHECKED_NON_NEGATIVE(options->stride_d);
1307 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1308 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
1309 desc.m_DilationZ = CHECKED_NON_NEGATIVE(options->dilation_d_factor);
1310
1311 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1312 CHECK_VALID_SIZE(inputs.size(), 2, 3);
1313
1314 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1315 CHECK_VALID_SIZE(outputs.size(), 1);
1316
Mike Kelly377fb212023-01-10 15:55:28 +00001317 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1318 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001319
1320 // Assuming input is NDHWC
1321 unsigned int inputDepth = inputTensorInfo.GetShape()[1];
1322 unsigned int inputHeight = inputTensorInfo.GetShape()[2];
1323 unsigned int inputWidth = inputTensorInfo.GetShape()[3];
1324
1325 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
1326 unsigned int filterDepth = filterTensorInfo.GetShape()[0];
1327 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1328 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1329
1330 CalcPadding(inputDepth, filterDepth, desc.m_StrideZ,
Teresa Charlin502ab942022-03-23 17:23:07 +00001331 desc.m_DilationZ, desc.m_PadFront, desc.m_PadBack, options->padding);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001332 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1333 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1334 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1335 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
1336
Mike Kelly5880b912022-01-28 16:18:54 +00001337 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001338
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001339 auto layerName = fmt::format("Conv3D:{}:{}", subgraphIndex, operatorIndex);
1340
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001341 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1342 // Add the first input and weights tensor to the registration list.
1343 // The constant weights will be added by SetupConstantLayers.
1344 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
1345
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001346 if (inputs.size() == 3)
1347 {
1348 desc.m_BiasEnabled = true;
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001349
1350 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1351 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001352 }
1353
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001354 armnn::IConnectableLayer* layer = m_Network->AddConvolution3dLayer(desc, layerName.c_str());
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001355 ARMNN_ASSERT(layer != nullptr);
1356
Mike Kelly377fb212023-01-10 15:55:28 +00001357 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001358 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1359
1360 // Register the input connection slots for the layer, connections are made after all layers have been created
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001361 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001362
1363 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1364 // Register the output connection slots for the layer, connections are made after all layers have been created
1365 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1366 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1367}
Matthew Sloyan4d217c02021-10-07 11:48:58 +01001368#endif
Matthew Sloyaneb5f8102021-10-05 17:31:42 +01001369
Kevin May7d96b162021-02-03 17:38:41 +00001370void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001371{
1372 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1373
Mike Kelly0d77ae12022-01-07 17:42:27 +00001374 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1375 const auto* options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01001376
1377 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1378
1379 DepthwiseConvolution2dDescriptor desc;
telsoa01c577f2c2018-08-31 09:22:23 +01001380 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1381 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001382 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001383 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +01001384
1385 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1386 CHECK_VALID_SIZE(inputs.size(), 2, 3);
Cathal Corbett06902652022-04-14 17:55:11 +01001387 if (inputs.size() == 3)
1388 {
1389 desc.m_BiasEnabled = true;
1390 }
1391
telsoa01c577f2c2018-08-31 09:22:23 +01001392 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1393 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +01001394 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1395 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001396
Mike Kelly377fb212023-01-10 15:55:28 +00001397 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1398 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001399
Matteo Martincigh747ef822018-12-18 09:26:39 +00001400 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +01001401 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1402 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +00001403
1404 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +01001405 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1406 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1407
Pablo Tellof0bd6832019-04-26 17:58:13 +01001408 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1409 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1410 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1411 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001412
Jan Eilers53ef7952021-06-02 12:01:25 +01001413 // ArmNN uses the same filter tensor layout at TfLite [1, H, W, O] no need for any permutation
James Ward58dec6b2020-09-11 17:32:44 +01001414 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001415
Cathal Corbett06902652022-04-14 17:55:11 +01001416 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1417 // Add the first input and weights tensor to the registration list.
1418 // The constant weights will be added by SetupConstantLayers.
1419 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
1420
1421 armnn::IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, layerName.c_str());
1422
1423 if (desc.m_BiasEnabled)
telsoa01c577f2c2018-08-31 09:22:23 +01001424 {
1425 desc.m_BiasEnabled = true;
Mike Kelly377fb212023-01-10 15:55:28 +00001426 TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Cathal Corbett06902652022-04-14 17:55:11 +01001427
1428 // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
1429 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
telsoa01c577f2c2018-08-31 09:22:23 +01001430 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001431 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001432
Mike Kelly377fb212023-01-10 15:55:28 +00001433 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
jimfly01c25411c2018-11-14 17:47:22 +00001434 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001435
1436 // register the input connection slots for the layer, connections are made after all layers have been created
1437 // only the tensors for the inputs are relevant, exclude the const tensors
Cathal Corbett06902652022-04-14 17:55:11 +01001438 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
telsoa01c577f2c2018-08-31 09:22:23 +01001439
jimfly01c25411c2018-11-14 17:47:22 +00001440 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001441 // register the output connection slots for the layer, connections are made after all layers have been created
1442 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1443 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1444}
1445
Kevin May7d96b162021-02-03 17:38:41 +00001446void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsed66d142019-12-06 09:55:55 +00001447{
1448 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1449
1450 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1451 CHECK_VALID_SIZE(inputs.size(), 1);
1452
1453 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1454 CHECK_VALID_SIZE(outputs.size(), 1);
1455
James Ward58dec6b2020-09-11 17:32:44 +01001456 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +00001457
1458 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001459 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +00001460
Mike Kelly377fb212023-01-10 15:55:28 +00001461 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Finn Williamsed66d142019-12-06 09:55:55 +00001462 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1463
1464 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1465 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1466
1467 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1468 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1469}
1470
Teresa Charlin3ab85482021-06-08 16:59:29 +01001471void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorIndex)
1472{
1473 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1474
1475 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1476 CHECK_VALID_SIZE(inputs.size(), 2);
1477
1478 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1479 CHECK_VALID_SIZE(outputs.size(), 1);
1480
1481 auto layerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
1482
Mike Kelly377fb212023-01-10 15:55:28 +00001483 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001484 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1485
1486 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1487
1488 ReshapeDescriptor reshapeDesc;
Finn Williamsb49ed182021-06-29 15:50:08 +01001489
1490 if (outputTensorInfo.GetShape().AreAllDimensionsSpecified())
1491 {
1492 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1493 }
1494 else
1495 {
1496 int32_t axis = inputs[1]->shape[0];
1497
1498 int32_t inputDimSize = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
1499
1500 if (axis > inputDimSize || axis < 0 - (inputDimSize + 1))
1501 {
1502 throw ParseException("axis must be in range [0 - (inputDimSize + 1), inputDimSize] inclusive");
1503 }
1504
1505 if(axis < 0)
1506 {
1507 axis = inputDimSize + axis + 1;
1508 }
1509
Rob Hughesd812a312021-08-06 13:10:53 +01001510 std::vector<unsigned int> shape(static_cast<unsigned int>(inputDimSize) + 1);
Finn Williamsb49ed182021-06-29 15:50:08 +01001511 unsigned int inputShapeIndex = 0;
1512 for (unsigned int i = 0; i < static_cast<unsigned int>(inputDimSize + 1); ++i)
1513 {
1514 if (i == static_cast<unsigned int>(axis))
1515 {
1516 shape[i] = 1;
1517 }
1518 else
1519 {
1520 shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
1521 ++inputShapeIndex;
1522 }
1523 }
1524
Rob Hughesd812a312021-08-06 13:10:53 +01001525 reshapeDesc.m_TargetShape = TensorShape(static_cast<unsigned int>(inputDimSize + 1), shape.data());
Finn Williamsb49ed182021-06-29 15:50:08 +01001526 }
Teresa Charlin3ab85482021-06-08 16:59:29 +01001527
1528 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1529 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001530
1531 reshapeDesc.m_TargetShape = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}).GetShape();
1532 outputTensorInfo.SetShape(reshapeDesc.m_TargetShape);
1533
Teresa Charlin3ab85482021-06-08 16:59:29 +01001534 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1535
1536 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1537 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1538
1539 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1540 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1541}
1542
Kevin May7d96b162021-02-03 17:38:41 +00001543void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
Keith Davis4cd29a02019-09-09 14:49:20 +01001544{
1545 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1546
1547 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001548 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001549
1550 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1551 CHECK_VALID_SIZE(outputs.size(), 1);
1552
James Ward58dec6b2020-09-11 17:32:44 +01001553 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001554 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001555
josh minorba424d22019-11-13 10:55:17 -06001556 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001557 {
Mike Kelly377fb212023-01-10 15:55:28 +00001558 armnn::TensorInfo permuteTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Kevin May85d92602019-09-27 17:21:06 +01001559 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001560 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1561 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001562 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001563 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001564
Mike Kelly08759e22020-03-02 11:41:31 +00001565 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001566 }
Mike Kelly377fb212023-01-10 15:55:28 +00001567 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Keith Davis4cd29a02019-09-09 14:49:20 +01001568
James Conroy05102392020-06-24 15:39:55 +01001569 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001570 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001571
1572 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1573 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001574 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1575
1576 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1577 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1578
1579 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1580 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1581}
1582
Kevin May7d96b162021-02-03 17:38:41 +00001583void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001584{
1585 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1586
Mike Kelly0d77ae12022-01-07 17:42:27 +00001587 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1588 const auto* options = operatorPtr->builtin_options.AsTransposeConvOptions();
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001589
1590 TransposeConvolution2dDescriptor desc;
1591 desc.m_BiasEnabled = false;
1592 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1593 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1594 desc.m_DataLayout = armnn::DataLayout::NHWC;
1595
1596 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
David Monahan61683802021-01-12 09:11:07 +00001597 if (inputs.size() == 4)
1598 {
1599 desc.m_BiasEnabled = true;
1600 }
1601 else
1602 {
1603 CHECK_VALID_SIZE(inputs.size(), 3);
1604 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001605
1606 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1607 CHECK_VALID_SIZE(outputs.size(), 1);
1608
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001609 if (inputs[0])
1610 {
Mike Kelly377fb212023-01-10 15:55:28 +00001611 armnn::TensorInfo tensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001612 std::vector<int> output_shape(tensorInfo.GetNumElements());
Mike Kelly377fb212023-01-10 15:55:28 +00001613
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001614 if (tensorInfo.GetDataType() == DataType::Signed32)
1615 {
1616 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1617 }
1618 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1619 {
1620 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1621 {
1622 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1623 }
1624 }
1625 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1626 for (int dimension : output_shape)
1627 {
1628 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1629 }
1630 desc.m_OutputShapeEnabled = true;
1631 }
Mike Kelly377fb212023-01-10 15:55:28 +00001632 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
1633 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001634
1635 // TfLite uses NHWC tensors
1636 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1637 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1638
1639 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1640 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1641
1642 CalcPadding(inputHeight,
1643 filterHeight,
1644 desc.m_StrideY,
1645 1, // DilationY
1646 desc.m_PadTop,
1647 desc.m_PadBottom,
1648 options->padding);
1649
1650 CalcPadding(inputWidth,
1651 filterWidth,
1652 desc.m_StrideX,
1653 1, // DilationX
1654 desc.m_PadLeft,
1655 desc.m_PadRight,
1656 options->padding);
1657
Mike Kelly5880b912022-01-28 16:18:54 +00001658 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001659
1660 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001661 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001662
David Monahan61683802021-01-12 09:11:07 +00001663 if (desc.m_BiasEnabled)
1664 {
Mike Kelly377fb212023-01-10 15:55:28 +00001665 auto biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 3);
Mike Kelly5880b912022-01-28 16:18:54 +00001666 auto biasConstTensor = CreateConstTensorNonPermuted(inputs[3], biasTensorInfo, inputTensorInfo.GetDataType());
David Monahan61683802021-01-12 09:11:07 +00001667 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Mike Kelly5880b912022-01-28 16:18:54 +00001668 filterTensorAndData.first,
1669 biasConstTensor.first,
David Monahan61683802021-01-12 09:11:07 +00001670 layerName.c_str());
1671 }
1672 else
1673 {
1674 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Mike Kelly5880b912022-01-28 16:18:54 +00001675 filterTensorAndData.first,
David Monahan61683802021-01-12 09:11:07 +00001676 EmptyOptional(),
1677 layerName.c_str());
1678 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001679
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001680 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001681
Mike Kelly377fb212023-01-10 15:55:28 +00001682 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0 , { 2, 1 });
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001683 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1684
1685 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1686 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001687 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001688
1689 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1690 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1691}
1692
Kevin May7d96b162021-02-03 17:38:41 +00001693void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001694{
1695 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1696}
1697
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001698void TfLiteParserImpl::ParseBatchMatMul(size_t subgraphIndex, size_t operatorIndex)
1699{
1700 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1701
1702 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1703 CHECK_VALID_SIZE(inputs.size(), 2);
1704
1705 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1706 CHECK_VALID_SIZE(outputs.size(), 1);
1707
1708 auto layerName = fmt::format("BatchMatMul:{}:{}", subgraphIndex, operatorIndex);
1709
Mike Kelly377fb212023-01-10 15:55:28 +00001710 TensorInfo inputXTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1711 TensorInfo inputYTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001712
1713 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1714 const auto* options = operatorPtr->builtin_options.AsBatchMatMulOptions();
1715
Teresa Charlinbc37a6b2022-09-22 10:12:58 +01001716 // Adjoint in tensorflow lite performs transpose operation
1717 BatchMatMulDescriptor descriptor(options->adj_x,
1718 options->adj_y,
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001719 false,
Teresa Charlinbc37a6b2022-09-22 10:12:58 +01001720 false);
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001721 // Arbitrary DataLayout
1722
1723 IConnectableLayer* layer = m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str());
1724 ARMNN_ASSERT(layer != nullptr);
1725
Mike Kelly377fb212023-01-10 15:55:28 +00001726 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Samuel Yapfd3ba5a2022-08-24 17:04:34 +01001727 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1728
1729 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1730 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1731
1732 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1733 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1734}
1735
Kevin May7d96b162021-02-03 17:38:41 +00001736void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001737{
1738 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1739
1740 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1741 CHECK_VALID_SIZE(inputs.size(), 3);
1742
1743 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1744 CHECK_VALID_SIZE(outputs.size(), 1);
1745
Mike Kelly377fb212023-01-10 15:55:28 +00001746 armnn::TensorInfo blockShapeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001747 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1748
Mike Kelly377fb212023-01-10 15:55:28 +00001749 armnn::TensorInfo cropsTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001750 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1751
1752 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1753 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1754
1755 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1756 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1757
1758 size_t step = 2;
1759 std::vector<std::pair<unsigned int, unsigned int>> crops;
1760 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1761 {
1762 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1763 }
1764
1765 armnn::BatchToSpaceNdDescriptor desc;
1766 desc.m_BlockShape = blockShape;
1767 desc.m_Crops = crops;
1768 desc.m_DataLayout = armnn::DataLayout::NHWC;
1769
James Ward58dec6b2020-09-11 17:32:44 +01001770 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001771
Mike Kelly377fb212023-01-10 15:55:28 +00001772 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01001773
1774 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1775 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001776
1777 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1778 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001779 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1780
1781 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1782 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1783
1784 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1785 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1786}
1787
Kevin May7d96b162021-02-03 17:38:41 +00001788void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson28c94572019-07-18 10:47:03 +01001789{
1790 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1791
1792 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1793 CHECK_VALID_SIZE(inputs.size(), 1);
1794
1795 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1796 CHECK_VALID_SIZE(outputs.size(), 1);
1797
1798 L2NormalizationDescriptor desc;
1799 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001800 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001801 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1802
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001803 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001804
Mike Kelly377fb212023-01-10 15:55:28 +00001805 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Jackson28c94572019-07-18 10:47:03 +01001806 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1807
1808 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1809 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1810
1811 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1812 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1813}
1814
Kevin May7d96b162021-02-03 17:38:41 +00001815void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001816{
1817 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1818}
1819
Kevin May7d96b162021-02-03 17:38:41 +00001820void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001821{
1822 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1823
1824 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1825 CHECK_VALID_SIZE(inputs.size(), 2);
1826
1827 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1828 CHECK_VALID_SIZE(outputs.size(), 1);
1829
James Ward58dec6b2020-09-11 17:32:44 +01001830 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001831
Mike Kelly377fb212023-01-10 15:55:28 +00001832 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1833 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
James Conroy05102392020-06-24 15:39:55 +01001834 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001835
James Conroy05102392020-06-24 15:39:55 +01001836 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1837 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001838
1839 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
1840 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001841 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1842
1843 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001844 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001845
1846 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1847 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1848}
1849
Kevin May7d96b162021-02-03 17:38:41 +00001850void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001851{
1852 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1853
1854 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1855 CHECK_VALID_SIZE(inputs.size(), 2);
1856
1857 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1858 CHECK_VALID_SIZE(outputs.size(), 1);
1859
James Ward58dec6b2020-09-11 17:32:44 +01001860 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001861
Mike Kelly377fb212023-01-10 15:55:28 +00001862 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
1863 TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
James Conroy05102392020-06-24 15:39:55 +01001864 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001865
James Conroy05102392020-06-24 15:39:55 +01001866 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1867 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001868
1869 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
1870 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001871 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1872
1873 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001874 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001875
1876 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1877 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1878}
1879
Kevin May7d96b162021-02-03 17:38:41 +00001880void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
1881 size_t operatorIndex,
1882 PoolingAlgorithm algorithm)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001883{
1884 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1885
Mike Kelly0d77ae12022-01-07 17:42:27 +00001886 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1887 const auto* options = operatorPtr->builtin_options.AsPool2DOptions();
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001888
1889 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1890
1891 std::string layerName;
1892
1893 switch (algorithm)
1894 {
1895 case PoolingAlgorithm::Average:
1896 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001897 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001898 break;
1899 case PoolingAlgorithm::Max:
1900 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001901 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001902 break;
1903 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001904 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001905 }
1906
1907 Pooling2dDescriptor desc;
1908
1909 desc.m_PoolType = algorithm;
1910 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1911 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1912 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1913 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1914 desc.m_PaddingMethod = PaddingMethod::Exclude;
1915 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001916 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001917
1918 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1919 CHECK_VALID_SIZE(inputs.size(), 1);
Mike Kelly377fb212023-01-10 15:55:28 +00001920 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001921
1922 // assuming input is NHWC
1923 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1924 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1925
Pablo Tellof0bd6832019-04-26 17:58:13 +01001926 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1927 desc.m_PadTop, desc.m_PadBottom, options->padding);
1928 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1929 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001930
1931 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1932 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001933
James Conroy05102392020-06-24 15:39:55 +01001934 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1935 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00001936
1937 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
1938 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
jimfly01c25411c2018-11-14 17:47:22 +00001939 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001940
1941 // register the input connection slots for the layer, connections are made after all layers have been created
1942 // only the tensors for the inputs are relevant, exclude the const tensors
1943 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001944 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001945
jimfly01c25411c2018-11-14 17:47:22 +00001946 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001947 // register the output connection slots for the layer, connections are made after all layers have been created
1948 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1949 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1950}
1951
Kevin May7d96b162021-02-03 17:38:41 +00001952void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
josh minorba424d22019-11-13 10:55:17 -06001953{
1954 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1955
1956 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1957 CHECK_VALID_SIZE(inputs.size(), 3);
1958 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1959 CHECK_VALID_SIZE(outputs.size(), 1);
1960
1961 SliceDescriptor desc;
1962
1963 // set begin tensor info for slice descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00001964 armnn::TensorInfo beginTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
josh minorba424d22019-11-13 10:55:17 -06001965 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1966
1967 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1968 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1969
1970 // set size tensor info for slice descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00001971 armnn::TensorInfo sizeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
josh minorba424d22019-11-13 10:55:17 -06001972 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1973
Cathal Corbettde33dda2022-09-20 16:40:09 +01001974 std::vector<int> signedSize(sizeTensorInfo.GetNumElements(), 1);
1975
1976 // if size buffer data is not specified, all contents of size vector remain as values of 1
1977 if (sizeBufferPtr->data.data())
1978 {
1979 ::memcpy(signedSize.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1980 }
1981
josh minorba424d22019-11-13 10:55:17 -06001982 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
Mike Kelly377fb212023-01-10 15:55:28 +00001983 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly7ba84d62021-09-10 15:27:19 +01001984
1985 for (unsigned int i = 0; i < signedSize.size(); ++i)
1986 {
1987 int signedValue = signedSize[i];
Jim Flynnfca233e2021-09-23 12:16:53 +01001988
Mike Kelly7ba84d62021-09-10 15:27:19 +01001989 if (signedValue < -1 || signedValue > static_cast<int>(inputTensorInfo.GetShape()[i] - begin[i]))
1990 {
1991 throw ParseException(fmt::format("Invalid value for size {} size must be in range "
1992 "[-1, inputDimSize - begin] [-1, {}] inclusive {}",
1993 signedValue,
1994 inputTensorInfo.GetShape()[i] - begin[i],
1995 CHECK_LOCATION().AsString()));
1996 }
1997
1998 if (signedValue == -1)
1999 {
2000 size[i] = inputTensorInfo.GetShape()[i] - begin[i];
2001 }
2002 else
2003 {
2004 size[i] = static_cast<unsigned int>(signedValue);
2005 }
2006 }
2007
josh minorba424d22019-11-13 10:55:17 -06002008 desc = SliceDescriptor(begin, size);
2009
James Ward58dec6b2020-09-11 17:32:44 +01002010 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06002011
James Conroy05102392020-06-24 15:39:55 +01002012 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
Mike Kelly377fb212023-01-10 15:55:28 +00002013
2014 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2015 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
josh minorba424d22019-11-13 10:55:17 -06002016 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2017
2018 // register the input connection slots for the layer, connections are made after all layers have been created
2019 // only the tensors for the inputs are relevant, exclude the const tensors
2020 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2021 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2022
2023 // register the output connection slots for the layer, connections are made after all layers have been created
2024 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2025 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2026}
2027
Kevin May7d96b162021-02-03 17:38:41 +00002028void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01002029{
2030 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002031 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2032 const auto* options = operatorPtr->builtin_options.AsSoftmaxOptions();
telsoa01c577f2c2018-08-31 09:22:23 +01002033
2034 SoftmaxDescriptor desc;
2035 desc.m_Beta = options->beta;
2036
2037 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2038 CHECK_VALID_SIZE(inputs.size(), 1);
2039 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2040 CHECK_VALID_SIZE(outputs.size(), 1);
2041
James Ward58dec6b2020-09-11 17:32:44 +01002042 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01002043 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
2044
Mike Kelly377fb212023-01-10 15:55:28 +00002045 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
telsoa01c577f2c2018-08-31 09:22:23 +01002046 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2047
2048 // register the input connection slots for the layer, connections are made after all layers have been created
2049 // only the tensors for the inputs are relevant, exclude the const tensors
2050 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2051 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2052
2053 // register the output connection slots for the layer, connections are made after all layers have been created
2054 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2055 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2056}
2057
Teresa Charlinfd33a692022-06-29 15:35:57 +01002058void TfLiteParserImpl::ParseLogSoftmax(size_t subgraphIndex, size_t operatorIndex)
2059{
2060 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2061
2062 LogSoftmaxDescriptor desc;
2063
2064 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2065 CHECK_VALID_SIZE(inputs.size(), 1);
2066 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2067 CHECK_VALID_SIZE(outputs.size(), 1);
2068
2069 auto layerName = fmt::format("LogSoftmax:{}:{}", subgraphIndex, operatorIndex);
2070 IConnectableLayer* const layer = m_Network->AddLogSoftmaxLayer(desc, layerName.c_str());
2071
Mike Kelly377fb212023-01-10 15:55:28 +00002072 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Teresa Charlinfd33a692022-06-29 15:35:57 +01002073 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2074
2075 // register the input connection slots for the layer, connections are made after all layers have been created
2076 // only the tensors for the inputs are relevant, exclude the const tensors
2077 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2078 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2079
2080 // register the output connection slots for the layer, connections are made after all layers have been created
2081 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2082 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2083}
2084
Kevin May7d96b162021-02-03 17:38:41 +00002085void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002086{
2087 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2088
2089 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2090 CHECK_VALID_SIZE(inputs.size(), 3);
2091
2092 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2093 CHECK_VALID_SIZE(outputs.size(), 1);
2094
Mike Kelly377fb212023-01-10 15:55:28 +00002095 armnn::TensorInfo blockShapeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002096 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2097
Mike Kelly377fb212023-01-10 15:55:28 +00002098 armnn::TensorInfo padListTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002099 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2100
2101 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
2102 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
2103
2104 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
2105 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
2106
2107 size_t step = 2;
2108 std::vector<std::pair<unsigned int, unsigned int>> padList;
2109 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
2110 {
2111 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
2112 }
2113
2114 armnn::SpaceToBatchNdDescriptor desc;
2115 desc.m_BlockShape = blockShape;
2116 desc.m_PadList = padList;
2117 desc.m_DataLayout = armnn::DataLayout::NHWC;
2118
James Ward58dec6b2020-09-11 17:32:44 +01002119 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002120
Mike Kelly377fb212023-01-10 15:55:28 +00002121 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01002122
2123 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
2124 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00002125
2126 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
2127 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalvesbaded142019-02-08 19:02:48 -02002128 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2129
2130 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2131 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2132
2133 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2134 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2135}
2136
Teresa Charlin3ab85482021-06-08 16:59:29 +01002137armnn::TensorInfo TfLiteParserImpl::OutputShapeOfSqueeze(std::vector<uint32_t> squeezeDims,
Mike Kelly0d77ae12022-01-07 17:42:27 +00002138 const armnn::TensorInfo& inputTensorInfo)
telsoa01c577f2c2018-08-31 09:22:23 +01002139{
Teresa Charlin3ab85482021-06-08 16:59:29 +01002140 CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
telsoa01c577f2c2018-08-31 09:22:23 +01002141 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2142
2143 if (inputTensorInfo.GetNumDimensions() > 4)
2144 {
2145 std::stringstream ss;
2146 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2147 << " shape:" << inputTensorInfo.GetShape() << " "
2148 << CHECK_LOCATION().AsString();
2149 throw ParseException(ss.str());
2150 }
2151
2152 if (squeezeDims.empty())
2153 {
2154 squeezeDims.assign(dimensionSequence,
2155 dimensionSequence+inputTensorInfo.GetNumDimensions());
2156 }
2157
2158 std::vector<uint32_t> outputDims;
2159 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2160 {
2161 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2162 auto currentDimension = inputTensorInfo.GetShape()[i];
2163 if (skipSqueeze || currentDimension != 1)
2164 {
2165 outputDims.push_back(currentDimension);
2166 }
2167 }
2168
2169 if (outputDims.size() > 4)
2170 {
2171 std::stringstream ss;
2172 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2173 << " shape:" << inputTensorInfo.GetShape() << " "
2174 << CHECK_LOCATION().AsString();
2175 throw ParseException(ss.str());
2176 }
2177
2178 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2179 outputDims.data());
2180
2181 // we need to preserve the tensor type and the quantization data as well
2182 TensorInfo outTensorInfo = inputTensorInfo;
2183 outTensorInfo.SetShape(outShape);
2184
2185 return outTensorInfo;
2186}
2187
Keith Davis0176fd82021-06-01 17:36:32 +01002188void TfLiteParserImpl::ParseShape(size_t subgraphIndex, size_t operatorIndex)
2189{
2190 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2191
2192 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2193 CHECK_VALID_SIZE(inputs.size(), 1);
2194 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2195 CHECK_VALID_SIZE(outputs.size(), 1);
2196
2197 auto layerName = fmt::format("Shape:{}:{}", subgraphIndex, operatorIndex);
2198
2199 IConnectableLayer* layer = m_Network->AddShapeLayer(layerName.c_str());
2200 ARMNN_ASSERT(layer != nullptr);
2201
Mike Kelly377fb212023-01-10 15:55:28 +00002202 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Keith Davis0176fd82021-06-01 17:36:32 +01002203 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2204
2205 // Check if output tensor type is Signed32 or Signed64
2206 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
2207 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
2208 {
2209 throw ParseException(
2210 fmt::format(
2211 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
2212 CHECK_LOCATION().AsString()));
2213 }
2214
2215 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2216 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2217
2218 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2219 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2220}
2221
Kevin May7d96b162021-02-03 17:38:41 +00002222void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01002223{
2224 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2225
2226 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2227 CHECK_VALID_SIZE(inputs.size(), 1);
2228
2229 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2230 CHECK_VALID_SIZE(outputs.size(), 1);
2231
2232 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2233 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002234 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01002235
Mike Kelly377fb212023-01-10 15:55:28 +00002236 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Teresa Charlin3ab85482021-06-08 16:59:29 +01002237
2238 std::vector<uint32_t> squeezeDim;
2239 // A single negative dim index is interpreted as a negative index in python
2240 // Meaning the index will be the shape size plus the negative index value
2241 if (options->squeeze_dims.size() == 1 && options->squeeze_dims[0] < 0)
2242 {
2243 int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
2244 squeezeDim.push_back(static_cast<uint32_t>(dim));
2245 }
2246 else
2247 {
2248 squeezeDim = AsUnsignedVector(options->squeeze_dims);
2249 }
2250
2251 armnn::TensorInfo outputTensorInfo = TfLiteParserImpl::OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
2252
James Conroy05102392020-06-24 15:39:55 +01002253 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01002254
2255 ReshapeDescriptor reshapeDesc;
2256 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
2257
Mike Kellyb2293702023-02-14 17:16:12 +00002258 auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
2259 m_TensorInfos[outputTensorIds[0]] = outputTensorInfo;
2260
telsoa01c577f2c2018-08-31 09:22:23 +01002261 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002262 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01002263 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2264
2265 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2266 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2267
2268 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2269 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2270}
2271
Kevin May7d96b162021-02-03 17:38:41 +00002272void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002273{
2274 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2275
2276 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2277 CHECK_VALID_SIZE(inputs.size(), 4);
2278
2279 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2280 CHECK_VALID_SIZE(outputs.size(), 1);
2281
Mike Kelly0d77ae12022-01-07 17:42:27 +00002282 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2283 const auto* options = operatorPtr->builtin_options.AsStridedSliceOptions();
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002284
2285 StridedSliceDescriptor desc;
2286 desc.m_BeginMask = options->begin_mask;
2287 desc.m_EllipsisMask = options->ellipsis_mask;
2288 desc.m_EndMask = options->end_mask;
2289 desc.m_NewAxisMask = options->new_axis_mask;
2290 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
2291 desc.m_DataLayout = armnn::DataLayout::NHWC;
2292
Mike Kelly377fb212023-01-10 15:55:28 +00002293 armnn::TensorInfo beginTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002294 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2295
2296 std::vector<int> begin(beginTensorInfo.GetNumElements());
2297 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
2298
Mike Kelly377fb212023-01-10 15:55:28 +00002299 armnn::TensorInfo endTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002300 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2301
2302 std::vector<int> end(endTensorInfo.GetNumElements());
2303 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
2304
Mike Kelly377fb212023-01-10 15:55:28 +00002305 armnn::TensorInfo strideTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 3);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002306 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
2307
2308 std::vector<int> stride(strideTensorInfo.GetNumElements());
2309 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
2310
2311 desc.m_Begin = begin;
2312 desc.m_End = end;
2313 desc.m_Stride = stride;
2314
James Ward58dec6b2020-09-11 17:32:44 +01002315 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002316 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002317 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002318
Mike Kelly377fb212023-01-10 15:55:28 +00002319 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves451d95b2019-02-12 22:59:22 -02002320 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2321
2322 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2323 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2324
2325 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2326 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2327}
2328
Kevin May7d96b162021-02-03 17:38:41 +00002329void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002330{
2331 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2332
Mike Kelly0d77ae12022-01-07 17:42:27 +00002333 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2334 const auto* options = operatorPtr->builtin_options.AsSubOptions();
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002335
2336 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2337 CHECK_VALID_SIZE(inputs.size(), 2);
2338
2339 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2340 CHECK_VALID_SIZE(outputs.size(), 1);
2341
Mike Kelly377fb212023-01-10 15:55:28 +00002342 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2343 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002344
James Ward58dec6b2020-09-11 17:32:44 +01002345 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002346 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002347 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002348
Mike Kelly377fb212023-01-10 15:55:28 +00002349 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002350 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2351
2352 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002353 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02002354
2355 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2356
2357 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2358 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2359}
2360
Kevin May7d96b162021-02-03 17:38:41 +00002361void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302362{
2363 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2364
Mike Kelly0d77ae12022-01-07 17:42:27 +00002365 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2366 const auto* options = operatorPtr->builtin_options.AsDivOptions();
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302367
2368 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2369 CHECK_VALID_SIZE(inputs.size(), 2);
2370
2371 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2372 CHECK_VALID_SIZE(outputs.size(), 1);
2373
Mike Kelly377fb212023-01-10 15:55:28 +00002374 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2375 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302376
James Ward58dec6b2020-09-11 17:32:44 +01002377 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302378 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002379 ARMNN_ASSERT(layer != nullptr);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302380
Mike Kelly377fb212023-01-10 15:55:28 +00002381 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302382 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2383
2384 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002385 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05302386 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2387
2388 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2389 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2390}
2391
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002392void TfLiteParserImpl::ParseFloorDiv(size_t subgraphIndex, size_t operatorIndex)
2393{
2394 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2395
2396 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2397 CHECK_VALID_SIZE(inputs.size(), 2);
2398
2399 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2400 CHECK_VALID_SIZE(outputs.size(), 1);
2401
Mike Kelly377fb212023-01-10 15:55:28 +00002402 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2403 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002404
2405 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
2406 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
2407 ARMNN_ASSERT(layer != nullptr);
2408
Mike Kelly377fb212023-01-10 15:55:28 +00002409 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Teresa Charlincdbd40b2022-02-25 13:21:55 +00002410 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2411
2412 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2413 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2414 layer = AddFusedFloorLayer(layer, 0);
2415
2416 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2417 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2418}
2419
Kevin May7d96b162021-02-03 17:38:41 +00002420void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002421{
2422 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2423
Mike Kelly0d77ae12022-01-07 17:42:27 +00002424 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2425 const auto* options = operatorPtr->builtin_options.AsAddOptions();
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002426
2427 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2428 CHECK_VALID_SIZE(inputs.size(), 2);
2429
2430 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2431 CHECK_VALID_SIZE(outputs.size(), 1);
2432
Mike Kelly377fb212023-01-10 15:55:28 +00002433 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2434 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002435
James Ward58dec6b2020-09-11 17:32:44 +01002436 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002437 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002438 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002439
Mike Kelly377fb212023-01-10 15:55:28 +00002440 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002441 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2442
2443 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002444 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002445 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2446
2447 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2448 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2449}
2450
Kevin May7d96b162021-02-03 17:38:41 +00002451void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002452{
2453 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2454
Mike Kelly0d77ae12022-01-07 17:42:27 +00002455 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2456 const auto* options = operatorPtr->builtin_options.AsMulOptions();
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002457
2458 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2459 CHECK_VALID_SIZE(inputs.size(), 2);
2460
2461 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2462 CHECK_VALID_SIZE(outputs.size(), 1);
2463
Mike Kelly377fb212023-01-10 15:55:28 +00002464 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2465 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002466
James Ward58dec6b2020-09-11 17:32:44 +01002467 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002468 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002469 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002470
Mike Kelly377fb212023-01-10 15:55:28 +00002471 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002472 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2473
2474 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002475 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002476 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2477
2478 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2479 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2480}
2481
Kevin May7d96b162021-02-03 17:38:41 +00002482void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002483{
2484 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2485
2486 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2487
2488 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2489 CHECK_VALID_SIZE(outputs.size(), 1);
2490
Mike Kelly377fb212023-01-10 15:55:28 +00002491 armnn::TensorInfo dimTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002492 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2493
2494 armnn::MeanDescriptor desc;
2495 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
2496 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
2497 desc.m_Axis = axis;
2498
Mike Kelly377fb212023-01-10 15:55:28 +00002499 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002500 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002501
2502 desc.m_KeepDims =
Mike Kelly377fb212023-01-10 15:55:28 +00002503 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002504 true : false;
2505
James Ward58dec6b2020-09-11 17:32:44 +01002506 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002507 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002508 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002509
Mike Kelly377fb212023-01-10 15:55:28 +00002510 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002511 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2512
2513 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2514 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2515
2516 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2517 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2518}
2519
Kevin May7d96b162021-02-03 17:38:41 +00002520void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002521{
2522 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2523
Kevin May7d96b162021-02-03 17:38:41 +00002524 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002525
Kevin May7d96b162021-02-03 17:38:41 +00002526 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002527 CHECK_VALID_SIZE(outputs.size(), 1);
2528
Mike Kelly377fb212023-01-10 15:55:28 +00002529 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2530 armnn::TensorInfo padTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002531
Mike Kelly0d77ae12022-01-07 17:42:27 +00002532 std::vector<unsigned int> padBuffer = GetUIntBuffer(padTensorInfo, m_Model, inputs[1]->buffer);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002533
2534 size_t step = 2;
2535 armnn::PadDescriptor desc;
Mike Kelly0d77ae12022-01-07 17:42:27 +00002536 auto opcode = GetOpCode(m_Model, subgraphIndex, operatorIndex);
2537
2538 if (opcode == tflite::BuiltinOperator_PAD)
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002539 {
Mike Kelly0d77ae12022-01-07 17:42:27 +00002540 CHECK_VALID_SIZE(inputs.size(), 2);
2541
2542 if (inputTensorInfo.IsQuantized())
2543 {
2544 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2545 }
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002546 }
Mike Kelly0d77ae12022-01-07 17:42:27 +00002547 else if (opcode == tflite::BuiltinOperator_PADV2)
2548 {
2549 CHECK_VALID_SIZE(inputs.size(), 3);
2550
Mike Kelly377fb212023-01-10 15:55:28 +00002551 armnn::TensorInfo padValueTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002552
2553 if (padValueTensorInfo.GetNumElements() != 1)
2554 {
2555 ARMNN_THROW_PARSE_EXCEPTION("Multiple padding values are not supported in PADV2");
2556 }
2557 BufferRawPtr padValueBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
2558
2559 // Get the pad value from the input tensor
2560 if (padValueBufferPtr->data.size() > 0)
2561 {
2562 switch (padValueTensorInfo.GetDataType())
2563 {
2564 case armnn::DataType::Float32:
2565 {
2566 std::vector<float> padValueBuffer(padValueTensorInfo.GetNumElements());
2567 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2568 desc.m_PadValue = padValueBuffer[0];
2569 break;
2570 }
2571 case armnn::DataType::QAsymmU8:
2572 {
2573 std::vector<uint8_t> padValueBuffer(padValueTensorInfo.GetNumElements());
2574 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2575 desc.m_PadValue = armnn::Dequantize<uint8_t>(padValueBuffer[0],
2576 padValueTensorInfo.GetQuantizationScale(),
2577 padValueTensorInfo.GetQuantizationOffset());
2578 break;
2579 }
2580 case armnn::DataType::QAsymmS8:
2581 case armnn::DataType::QSymmS8:
2582 {
2583 std::vector<int8_t> padValueBuffer(padValueTensorInfo.GetNumElements());
2584 ::memcpy(padValueBuffer.data(), padValueBufferPtr->data.data(), padValueBufferPtr->data.size());
2585 desc.m_PadValue = armnn::Dequantize<int8_t>(padValueBuffer[0],
2586 padValueTensorInfo.GetQuantizationScale(),
2587 padValueTensorInfo.GetQuantizationOffset());
2588 break;
2589 }
2590 default: ARMNN_THROW_PARSE_EXCEPTION("Unsupported DataType");
2591 }
2592 }
2593 else if (inputTensorInfo.IsQuantized())
2594 {
2595 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2596 }
2597 }
2598
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002599 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2600 {
2601 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2602 }
2603
Mike Kelly0d77ae12022-01-07 17:42:27 +00002604 auto layerName = (opcode == tflite::BuiltinOperator_PAD) ? fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex)
2605 : fmt::format("PadV2:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01002606
2607 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
2608 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00002609 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002610 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2611
2612 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2613 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2614
2615 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2616 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2617}
2618
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002619void TfLiteParserImpl::ParseMirrorPad(size_t subgraphIndex, size_t operatorIndex)
2620{
2621 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2622
2623 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2624 CHECK_VALID_SIZE(inputs.size(), 2);
2625
2626 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2627 CHECK_VALID_SIZE(outputs.size(), 1);
2628
Mike Kelly377fb212023-01-10 15:55:28 +00002629 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002630
Mike Kelly377fb212023-01-10 15:55:28 +00002631 armnn::TensorInfo padTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002632 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2633
2634 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
2635 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
2636
2637 size_t step = 2;
2638 armnn::PadDescriptor desc;
2639 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2640 {
2641 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2642 }
2643
2644 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2645 const auto* options = operatorPtr->builtin_options.AsMirrorPadOptions();
2646
2647 if (options->mode == tflite::MirrorPadMode_REFLECT)
2648 {
2649 desc.m_PaddingMode = PaddingMode::Reflect;
2650 }
2651 else if (options->mode == tflite::MirrorPadMode_SYMMETRIC)
2652 {
2653 desc.m_PaddingMode = PaddingMode::Symmetric;
2654 }
2655 else
2656 {
2657 ARMNN_THROW_PARSE_EXCEPTION("PaddingMode must be either REFLECT or SYMMETRIC");
2658 }
2659
2660 // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
2661 // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
2662 auto inputShape = inputTensorInfo.GetShape();
2663 auto padList = desc.m_PadList;
2664
2665 const unsigned int isReflect = static_cast<unsigned int>(desc.m_PaddingMode == PaddingMode::Reflect);
2666 for(unsigned int i = 0; i < padList.size(); ++i)
2667 {
2668 if(padList.at(i).first > (inputShape[i] - isReflect) ||
2669 padList.at(i).second > (inputShape[i] - isReflect))
2670 {
2671 ARMNN_THROW_PARSE_EXCEPTION("Padding values must be less (Reflect) or "
2672 "equal (Symmetric) to the dimension size.");
2673 }
2674 }
2675
2676 auto layerName = fmt::format("MirrorPad:{}:{}", subgraphIndex, operatorIndex);
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002677
2678 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
2679 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00002680 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Sloyanaf3a4ef2021-10-22 15:48:12 +01002681 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2682
2683 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2684 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2685
2686 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2687 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2688}
2689
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002690void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
2691{
2692 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2693
2694 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2695 CHECK_VALID_SIZE(inputs.size(), 2);
2696
2697 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2698 CHECK_VALID_SIZE(outputs.size(), 1);
2699
2700 auto layerName = fmt::format("Prelu:{}:{}", subgraphIndex, operatorIndex);
2701
Mike Kelly377fb212023-01-10 15:55:28 +00002702 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
2703 armnn::TensorInfo alphaTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002704
2705 IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
2706 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00002707
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002708
2709 if (IsConstTensor(inputs[1]))
2710 {
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002711 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawaratbf99b5f2021-05-27 09:55:43 +01002712 armnn::IInputSlot* slot = &(layer->GetInputSlot(0));
2713 RegisterConsumerOfTensor(subgraphIndex, inputTensorIndexes[0], slot);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002714
Mike Kelly5880b912022-01-28 16:18:54 +00002715 auto alphaTensorAndData = CreateConstTensorNonPermuted(inputs[1], alphaTensorInfo,
2716 inputTensorInfo.GetDataType());
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002717 std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name);
2718 IConnectableLayer* constLayer =
Mike Kelly5880b912022-01-28 16:18:54 +00002719 m_Network->AddConstantLayer(alphaTensorAndData.first, constLayerName.c_str());
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002720 ARMNN_ASSERT(constLayer != nullptr);
2721
2722 constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
2723 constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
2724 RegisterOutputSlots(subgraphIndex,
2725 VIRTUAL_OPERATOR_ID,
2726 constLayer,
2727 { inputTensorIndexes[1] });
2728 }
2729 else
2730 {
2731 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2732 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIndexes);
2733 }
2734
Mike Kelly377fb212023-01-10 15:55:28 +00002735 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
2736 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2737 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2738
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002739 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2740 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2741}
2742
Kevin May7d96b162021-02-03 17:38:41 +00002743void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan66dedc72019-12-10 16:32:07 +00002744{
2745 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2746
2747 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2748 CHECK_VALID_SIZE(inputs.size(), 1);
2749
2750 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2751 CHECK_VALID_SIZE(outputs.size(), 1);
2752
James Ward58dec6b2020-09-11 17:32:44 +01002753 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002754
2755 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002756 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002757
Mike Kelly377fb212023-01-10 15:55:28 +00002758 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan66dedc72019-12-10 16:32:07 +00002759 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2760
2761 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2762 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2763
2764 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2765 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2766}
Finn Williamsc42c3842019-01-22 14:18:11 +00002767
Kevin May7d96b162021-02-03 17:38:41 +00002768void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002769{
Finn Williamsc42c3842019-01-22 14:18:11 +00002770 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01002771}
2772
Kevin May7d96b162021-02-03 17:38:41 +00002773void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002774{
Finn Williamsc42c3842019-01-22 14:18:11 +00002775 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
2776}
Sadik Armagan58f39192018-09-17 14:14:39 +01002777
Kevin May7d96b162021-02-03 17:38:41 +00002778void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan12239e72020-05-27 11:06:17 +01002779{
Jan Eilers2f746b32020-07-28 14:00:06 +01002780 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01002781}
2782
Kevin May7d96b162021-02-03 17:38:41 +00002783void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsc42c3842019-01-22 14:18:11 +00002784{
2785 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
2786}
2787
Kevin May7d96b162021-02-03 17:38:41 +00002788void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd99851762019-04-09 09:37:38 +01002789{
2790 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
2791}
2792
Kevin May7d96b162021-02-03 17:38:41 +00002793void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
Matthew Sloyan7515d072020-12-16 12:50:01 +00002794{
2795 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
2796}
2797
Kevin May7d96b162021-02-03 17:38:41 +00002798void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
Jan Eilers2f746b32020-07-28 14:00:06 +01002799{
2800 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
2801}
Finn Williamsc42c3842019-01-22 14:18:11 +00002802
Kevin May7d96b162021-02-03 17:38:41 +00002803void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
Finn Williamsc42c3842019-01-22 14:18:11 +00002804{
2805 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002806 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00002807 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01002808
2809 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2810 CHECK_VALID_SIZE(inputs.size(), 1);
2811
2812 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2813 CHECK_VALID_SIZE(outputs.size(), 1);
2814
James Ward58dec6b2020-09-11 17:32:44 +01002815 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01002816 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00002817 activationDesc.m_Function = activationType;
2818
2819 switch (activationType)
2820 {
2821 case ActivationFunction::ReLu:
2822 {
James Ward58dec6b2020-09-11 17:32:44 +01002823 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002824 break;
2825 }
2826 case ActivationFunction::BoundedReLu:
2827 {
James Ward58dec6b2020-09-11 17:32:44 +01002828 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002829 activationDesc.m_A = 6.0f;
2830 activationDesc.m_B = 0.0f;
2831 break;
2832 }
2833 case ActivationFunction::Sigmoid:
2834 {
James Ward58dec6b2020-09-11 17:32:44 +01002835 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002836 break;
2837 }
Nina Drozd99851762019-04-09 09:37:38 +01002838 case ActivationFunction::TanH:
2839 {
James Ward58dec6b2020-09-11 17:32:44 +01002840 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01002841 activationDesc.m_A = 1.0f;
2842 activationDesc.m_B = 1.0f;
2843 break;
2844 }
Sadik Armagan12239e72020-05-27 11:06:17 +01002845 case ActivationFunction::LeakyReLu:
2846 {
James Ward58dec6b2020-09-11 17:32:44 +01002847 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00002848 const auto* options = operatorPtr->builtin_options.AsLeakyReluOptions();
Sadik Armagan12239e72020-05-27 11:06:17 +01002849 activationDesc.m_A = options->alpha;
2850 break;
2851 }
Matthew Sloyan7515d072020-12-16 12:50:01 +00002852 case ActivationFunction::Elu:
2853 {
2854 layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
2855 activationDesc.m_A = 1.0f;
2856 break;
2857 }
Jan Eilers2f746b32020-07-28 14:00:06 +01002858 case ActivationFunction::HardSwish:
Matthew Sloyan7515d072020-12-16 12:50:01 +00002859 {
James Ward58dec6b2020-09-11 17:32:44 +01002860 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01002861 break;
Matthew Sloyan7515d072020-12-16 12:50:01 +00002862 }
Finn Williamsc42c3842019-01-22 14:18:11 +00002863 default:
2864 {
2865 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002866 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
2867 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00002868 }
2869 }
2870
2871 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01002872
Mike Kelly377fb212023-01-10 15:55:28 +00002873 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan58f39192018-09-17 14:14:39 +01002874 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2875
2876 // register the input connection slots for the layer, connections are made after all layers have been created
2877 // only the tensors for the inputs are relevant, exclude the const tensors
2878 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2879 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2880
2881 // register the output connection slots for the layer, connections are made after all layers have been created
2882 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2883 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2884}
Mike Kelly0d77ae12022-01-07 17:42:27 +00002885armnn::TensorInfo TfLiteParserImpl::OutputShapeOfReshape(const armnn::TensorInfo& inputTensorInfo,
2886 const std::vector<int32_t>& targetDimsIn)
Sadikb94967b2018-09-19 15:30:00 +01002887{
2888 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2889 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2890
2891 if (stretchDim != targetDimsIn.end())
2892 {
2893 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2894 {
2895 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002896 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01002897 }
2898
2899 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002900 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01002901 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2902
2903 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2904 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2905 }
2906
2907 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2908
2909 TensorInfo reshapeInfo = inputTensorInfo;
2910 reshapeInfo.SetShape(outputShape);
2911
2912 return reshapeInfo;
2913}
2914
Kevin May7d96b162021-02-03 17:38:41 +00002915void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
Sadikb94967b2018-09-19 15:30:00 +01002916{
2917 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2918
2919 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002920
2921 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2922 CHECK_VALID_SIZE(outputs.size(), 1);
2923
Mike Kelly0d77ae12022-01-07 17:42:27 +00002924 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2925 const auto* options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002926 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002927
Mike Kelly377fb212023-01-10 15:55:28 +00002928 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
kevmay0171972a82018-12-17 14:28:03 +00002929 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01002930 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00002931
Jan Eilersbac9b352020-07-13 13:40:24 +01002932 // Extracting new shape for the output
2933 // There are two ways it can be passed
2934 // * First is to define the target shape in the operator built-in options
2935 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00002936 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01002937 bool targetShapeFound = false;
2938 // Check if built-in options were given
2939 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00002940 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002941 // make sure the parameter is given
2942 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00002943 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002944 targetShape = options->new_shape;
2945 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00002946 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002947 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002948
2949 // If there is no built-in option given or if the built-in new_shape parameter was empty
2950 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00002951 {
Teresa Charlin6a056a42021-12-01 10:25:43 +00002952 // Check for a second input tensor
2953 if (inputs.size() > 1 && inputs[1] != nullptr)
Jan Eilersbac9b352020-07-13 13:40:24 +01002954 {
2955 if (inputs[1]->is_variable)
2956 {
2957 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
2958 }
2959
2960 if (inputs[1]->shape.size() != 1)
2961 {
2962 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
2963 }
2964
2965 if (inputs[1]->type != tflite::TensorType_INT32)
2966 {
2967 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
2968 }
2969
Teresa Charlin6a056a42021-12-01 10:25:43 +00002970 // Extract target shape from input
2971 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2972 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
Cathal Corbettd2f73232021-12-10 13:38:52 +00002973 if (values)
Sadik Armagan19a1c032021-01-20 12:17:00 +00002974 {
Cathal Corbettd2f73232021-12-10 13:38:52 +00002975 for (int i = 0; i < inputs[1]->shape[0]; ++i)
2976 {
2977 targetShape.push_back(values[i]);
2978 }
Sadik Armagan19a1c032021-01-20 12:17:00 +00002979 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00002980 else
Jan Eilersbac9b352020-07-13 13:40:24 +01002981 {
Cathal Corbettd2f73232021-12-10 13:38:52 +00002982 try
2983 {
2984 // We attempt to infer during Runtime.
Mike Kelly04d82292023-01-19 18:29:40 +00002985 TensorShape reshapeShapes = ToTensorInfo(inputs[1]).GetShape();
2986
2987 if (reshapeShapes[0] == actualOutputTensorInfo.GetNumDimensions())
2988 {
2989 for (unsigned int i = 0; i < actualOutputTensorInfo.GetShape().GetNumDimensions(); ++i)
2990 {
2991 targetShape.push_back(actualOutputTensorInfo.GetShape()[i]);
2992 }
2993 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00002994 // The parser only supports shape (batch, -1) or (-1) for non-constant shape input.
Mike Kelly04d82292023-01-19 18:29:40 +00002995 else if (reshapeShapes[0] > 2)
Cathal Corbettd2f73232021-12-10 13:38:52 +00002996 {
2997 throw ParseException(fmt::format("Invalid input shape '{}' in Reshape layer '{}' {}. "
2998 "When inferring during runtime, the parser only supports "
2999 "shape (batch, -1) or (-1) for target shape input.",
3000 reshapeShapes[0],
3001 layerName,
3002 CHECK_LOCATION().AsString()));
3003 }
Mike Kelly04d82292023-01-19 18:29:40 +00003004 else
Cathal Corbettd2f73232021-12-10 13:38:52 +00003005 {
Mike Kelly04d82292023-01-19 18:29:40 +00003006 const int32_t numInputElements = inputTensorInfo.GetNumElements();
3007 const int32_t inputTensorShape = inputTensorInfo.GetShape()[0];
3008 if (reshapeShapes[0] == 1)
3009 {
3010 targetShape = {numInputElements};
3011 }
3012 else if (reshapeShapes[0] == 2)
3013 {
3014 targetShape = {inputTensorShape, numInputElements / inputTensorShape};
3015 }
Cathal Corbettd2f73232021-12-10 13:38:52 +00003016 }
3017 }
3018 catch (const std::exception& exc)
3019 {
3020 ARMNN_THROW_PARSE_EXCEPTION("Failed attempt to infer during runtime the target shape input for "
3021 "Reshape operation. Reshape operator target shape input buffer data "
3022 "is null. " << exc.what());
3023 }
Jan Eilersbac9b352020-07-13 13:40:24 +01003024 }
3025 }
3026 else
Derek Lambertic9e52792020-03-11 11:42:26 +00003027 {
3028 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
3029 "At least one method required");
3030 }
Derek Lambertic9e52792020-03-11 11:42:26 +00003031 }
3032
kevmay0171972a82018-12-17 14:28:03 +00003033 armnn::TensorInfo reshapeOutputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00003034 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01003035
kevmay0171972a82018-12-17 14:28:03 +00003036 // Check for valid input size and that reshape parameters equal output shape
Cathal Corbett2b922e22022-09-23 15:49:24 +01003037 // The output shape can be provided to us in 2 ways:
3038 // 1. through the normal 'shape' parameter given by outputs[indx]->shape
3039 // 2. through additional parameter 'shape_signature' given by outputs[indx]->buffer.
3040 // This parameter can sometimes contain -1 value not visible in the 'shape' parameter.
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00003041 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
3042 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00003043 {
Cathal Corbett2b922e22022-09-23 15:49:24 +01003044 // Attempt to extract output shape from secondary 'shape_signature'
3045 // parameter and try to CheckShape() with this param.
3046 std::vector<int32_t> secondaryOutputTargetShape = outputs[0]->shape_signature;
3047
3048 // if outputs[0]->shape_signature contain a -1 value, we need to compute its actual value
3049 // from reshape input in order to correctly verify reshape parameters equal output shape
3050 armnn::TensorInfo secondaryReshapeOutputTensorInfo =
3051 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, secondaryOutputTargetShape);
3052
3053 if (!CheckShape(reshapeOutputTensorShape, secondaryReshapeOutputTensorInfo.GetShape()))
3054 {
3055 std::stringstream ss;
3056 ss << "New shape defined in reshape parameters "
3057 << reshapeOutputTensorShape
3058 << " does not equal output shape "
3059 << actualOutputTensorInfo.GetShape()
3060 << ": "
3061 << CHECK_LOCATION().AsString();
3062 throw ParseException(ss.str());
3063 }
kevmay0171972a82018-12-17 14:28:03 +00003064 }
Mike Kelly377fb212023-01-10 15:55:28 +00003065 auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
kevmay0171972a82018-12-17 14:28:03 +00003066
Sadikb94967b2018-09-19 15:30:00 +01003067 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00003068 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Mike Kelly377fb212023-01-10 15:55:28 +00003069 m_TensorInfos[outputTensorIds[0]] = reshapeOutputTensorInfo;
Sadikb94967b2018-09-19 15:30:00 +01003070
Sadikb94967b2018-09-19 15:30:00 +01003071 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003072 ARMNN_ASSERT(layer != nullptr);
kevmay0171972a82018-12-17 14:28:03 +00003073 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01003074
3075 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3076 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3077
3078 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3079 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3080}
3081
Kevin May7d96b162021-02-03 17:38:41 +00003082void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003083{
Sadik Armagana3b31f02019-12-05 09:08:53 +00003084 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
3085}
3086
Kevin May7d96b162021-02-03 17:38:41 +00003087void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagana3b31f02019-12-05 09:08:53 +00003088{
3089 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
3090}
3091
Kevin May7d96b162021-02-03 17:38:41 +00003092void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
Sadik Armagana3b31f02019-12-05 09:08:53 +00003093{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003094 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3095
3096 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3097 CHECK_VALID_SIZE(inputs.size(), 2);
3098
3099 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3100 CHECK_VALID_SIZE(outputs.size(), 1);
3101
Mike Kelly377fb212023-01-10 15:55:28 +00003102 armnn::TensorInfo sizeTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003103
3104 // Data for the parsed tensor args (size) must be stored locally.
3105 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
3106
3107 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3108 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
3109
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01003110 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00003111 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003112 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01003113 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
3114 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003115
James Ward58dec6b2020-09-11 17:32:44 +01003116 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00003117
3118 switch (resizeMethod)
3119 {
3120 case ResizeMethod::Bilinear:
3121 {
James Ward58dec6b2020-09-11 17:32:44 +01003122 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00003123
3124 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3125 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
3126
David Monahan4a0c9b92020-05-30 09:48:39 +01003127 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00003128 break;
3129 }
3130 case ResizeMethod::NearestNeighbor:
3131 {
James Ward58dec6b2020-09-11 17:32:44 +01003132 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00003133 break;
3134 }
3135 default:
3136 {
3137 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003138 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
3139 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00003140 }
3141 }
3142
Mike Kelly377fb212023-01-10 15:55:28 +00003143 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
James Conroy05102392020-06-24 15:39:55 +01003144
3145 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
3146 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00003147 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
3148 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02003149 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3150
3151 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3152 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3153
3154 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3155 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3156}
3157
Kevin May7d96b162021-02-03 17:38:41 +00003158void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan479045b2018-10-01 11:51:37 +01003159{
3160 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3161
Mike Kelly0d77ae12022-01-07 17:42:27 +00003162 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3163 const auto* options = operatorPtr->builtin_options.AsConcatenationOptions();
Sadik Armagan479045b2018-10-01 11:51:37 +01003164
3165 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
3166
3167 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3168 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Mike Kelly377fb212023-01-10 15:55:28 +00003169 auto inputTensorIds = GetInputTensorIds(m_Model, subgraphIndex, operatorIndex);
3170
Sadik Armagan479045b2018-10-01 11:51:37 +01003171 CHECK_VALID_SIZE(outputs.size(), 1);
3172
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003173 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
Mike Kelly377fb212023-01-10 15:55:28 +00003174 uint32_t inputRank = InputTensorInfo(subgraphIndex, operatorIndex, 0).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01003175
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003176 const unsigned int concatDimInput = static_cast<unsigned int>(
Mike Kelly377fb212023-01-10 15:55:28 +00003177 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01003178
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003179 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
3180 concatDescriptor.SetConcatAxis(concatDimInput);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003181 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01003182
3183 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
3184 {
Mike Kelly377fb212023-01-10 15:55:28 +00003185 TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, viewIndex);
Sadik Armagan479045b2018-10-01 11:51:37 +01003186
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003187 // This set up concatDescriptor view origin
3188 armnnUtils::ProcessConcatInputTensorInfo(
Mike Kelly377fb212023-01-10 15:55:28 +00003189 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01003190 }
3191
James Ward58dec6b2020-09-11 17:32:44 +01003192 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01003193
Jim Flynn906f9462019-05-10 13:55:21 +01003194 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003195 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00003196 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003197 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01003198
James Conroy05102392020-06-24 15:39:55 +01003199 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003200 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01003201
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00003202 // add fused activation layer
3203 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01003204
Sadik Armagan479045b2018-10-01 11:51:37 +01003205 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3206 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3207}
3208
Kevin May7d96b162021-02-03 17:38:41 +00003209void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003210{
3211 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3212
Mike Kelly0d77ae12022-01-07 17:42:27 +00003213 const auto& operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003214 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
3215
3216 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
3217
3218 FullyConnectedDescriptor desc;
3219 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01003220 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003221
3222 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3223 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3224 CHECK_VALID_SIZE(outputs.size(), 1);
3225
Mike Kelly377fb212023-01-10 15:55:28 +00003226 armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003227
3228 // Fully Connected Layer accepts two dimensional weights input
3229 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
3230 if (weightsDimension != 2)
3231 {
3232 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003233 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
3234 "Node {}",
3235 weightsDimension,
3236 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003237 }
3238
Matthew Jackson74bf7da2019-08-16 16:51:42 +01003239 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01003240 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003241
Matthew Sloyan81beae32021-07-13 19:46:11 +01003242 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3243 // Add the first input tensor to the registration list
3244 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0]};
Mike Kelly377fb212023-01-10 15:55:28 +00003245 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003246
3247 desc.m_ConstantWeights = IsConstTensor(inputs[1]);
3248
Matthew Sloyan81beae32021-07-13 19:46:11 +01003249 // Add the weights input to the registration list, constant layers will be added by SetupConstantLayers if constant.
3250 tensorIndexesToRegister.emplace_back(inputTensorIndexes[1]);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003251
Mike Kelly0506ef02023-01-03 16:29:44 +00003252 if (ShouldConstantTensorBeConverted(inputs[1], inputTensorInfo.GetDataType(), filterTensorInfo.GetDataType()))
Mike Kelly5880b912022-01-28 16:18:54 +00003253 {
3254 m_ConstantsToDequantize.emplace_back(inputs[1]->buffer);
3255 }
3256
Finn Williamsd4fa5452021-03-01 12:31:41 +00003257 if (inputs.size() == 3)
3258 {
3259 desc.m_BiasEnabled = true;
Mike Kelly377fb212023-01-10 15:55:28 +00003260 armnn::TensorInfo biasTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Matthew Sloyan81beae32021-07-13 19:46:11 +01003261
3262 // Add the biases input to the registration list, constant layer will be added by SetupConstantLayers.
3263 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
Mike Kelly5880b912022-01-28 16:18:54 +00003264
Mike Kelly0506ef02023-01-03 16:29:44 +00003265 if (ShouldConstantTensorBeConverted(inputs[2], inputTensorInfo.GetDataType(), biasTensorInfo.GetDataType()))
Mike Kelly5880b912022-01-28 16:18:54 +00003266 {
3267 m_ConstantsToDequantize.emplace_back(inputs[2]->buffer);
3268 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00003269 }
3270
Matthew Sloyan81beae32021-07-13 19:46:11 +01003271 // Filters and biases are always passed to fully connected as inputs
3272 layer = m_Network->AddFullyConnectedLayer(desc, layerName.c_str());
Finn Williamsd4fa5452021-03-01 12:31:41 +00003273
3274 ARMNN_ASSERT(layer != nullptr);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003275
Finn Williamsd4fa5452021-03-01 12:31:41 +00003276 unsigned int startingSlotIndex = 0;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003277 if (inputTensorInfo.GetNumDimensions() > 2)
3278 {
3279 // Add reshape to flatten to 2D [batch_size, input_size],
3280 // where "input_size" corresponds to the number of inputs to the layer,
3281 // matching the second dimension of weights,
3282 // and "batch_size" is calculated by dividing the number of elements by "input_size".
3283 std::vector<unsigned int> reshapedDimensions(2);
3284 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
3285 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
3286
3287 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
3288 {
3289 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003290 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
3291 reshapedDimensions[1],
3292 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003293 }
3294
Mike Kelly377fb212023-01-10 15:55:28 +00003295 armnn::TensorInfo reshapedTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003296 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
Mike Kelly377fb212023-01-10 15:55:28 +00003297 inputTensorInfo = reshapedTensorInfo;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003298
James Ward58dec6b2020-09-11 17:32:44 +01003299 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Finn Williamsd4fa5452021-03-01 12:31:41 +00003300 armnn::ReshapeDescriptor reshapeDescriptor;
3301 reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
Mike Kelly04d82292023-01-19 18:29:40 +00003302 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor,
3303 reshapeLayerName.c_str());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003304
3305 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
3306 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
3307
3308 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
Finn Williamsd4fa5452021-03-01 12:31:41 +00003309 // Fc layer connects to the reshape layer, so we skip the first input slot when registering fc's input slots
3310 tensorIndexesToRegister.erase(tensorIndexesToRegister.begin());
3311 startingSlotIndex = 1;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003312 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00003313
3314 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister, startingSlotIndex);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003315
Mike Kelly377fb212023-01-10 15:55:28 +00003316 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromShapes(subgraphIndex, operatorIndex, layer, 0,
3317 { inputTensorInfo.GetShape(),
3318 filterTensorInfo.GetShape() });
Mike Kelly04d82292023-01-19 18:29:40 +00003319
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003320 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3321
Mike Kelly04d82292023-01-19 18:29:40 +00003322 if (outputTensorInfo.GetNumDimensions() > 2)
3323 {
3324 // Calculate reshape to flatten to 2D [batch_size, input_size]
3325 std::vector<unsigned int> reshapedDimensions(2);
3326 reshapedDimensions[1] = filterTensorInfo.GetShape()[0];
3327 reshapedDimensions[0] = outputTensorInfo.GetNumElements() / reshapedDimensions[1];
3328 armnn::TensorInfo reshapedOutputTensorInfo = outputTensorInfo;
3329 if (outputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
3330 {
3331 throw ParseException(
3332 fmt::format("Failed to deduce output tensor shape from filter size {} {}",
3333 reshapedDimensions[1],
3334 CHECK_LOCATION().AsString()));
3335 }
3336 reshapedOutputTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
3337 layer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
3338
3339 std::string reshapeLayerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
3340 layer = AddReshapeLayer(layer, 0, reshapeLayerName, outputTensorInfo);
3341 }
3342
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003343 // we need to add the activation layer and fortunately we don't need to care about the data layout
3344 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
3345 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01003346
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003347 // register the output connection slots for the layer, connections are made after all layers have been created
3348 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3349 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
Mike Kelly04d82292023-01-19 18:29:40 +00003350
3351 m_TensorInfos[outputTensorIndexes[0]] = layer->GetOutputSlot(0).GetTensorInfo();
Sadik Armagan8853c1f2018-10-22 09:04:18 +01003352}
3353
Kevin May7d96b162021-02-03 17:38:41 +00003354void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
keidav011b3e2ea2019-02-21 10:07:37 +00003355{
3356 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3357
Mike Kelly0d77ae12022-01-07 17:42:27 +00003358 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
keidav011b3e2ea2019-02-21 10:07:37 +00003359
3360 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3361 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3362 CHECK_VALID_SIZE(outputs.size(), 4);
3363
3364 // Obtain custom options from flexbuffers
3365 auto custom_options = operatorPtr->custom_options;
3366 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
3367
3368 // Obtain descriptor information from tf lite
3369 DetectionPostProcessDescriptor desc;
3370 desc.m_MaxDetections = m["max_detections"].AsUInt32();
3371 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
3372 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
3373 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
3374 desc.m_NumClasses = m["num_classes"].AsUInt32();
3375 desc.m_ScaleH = m["h_scale"].AsFloat();
3376 desc.m_ScaleW = m["w_scale"].AsFloat();
3377 desc.m_ScaleX = m["x_scale"].AsFloat();
3378 desc.m_ScaleY = m["y_scale"].AsFloat();
3379
keidav0107d58c72019-02-26 11:57:39 +00003380 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00003381 {
keidav0107d58c72019-02-26 11:57:39 +00003382 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00003383 }
3384 if (!(m["detections_per_class"].IsNull()))
3385 {
3386 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
3387 }
3388
3389 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
3390 {
3391 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
3392 "must be positive and less than or equal to 1.");
3393 }
3394
Mike Kelly377fb212023-01-10 15:55:28 +00003395 armnn::TensorInfo anchorTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003396 auto anchorTensorAndData = CreateConstTensorNonPermuted(inputs[2], anchorTensorInfo);
keidav011b3e2ea2019-02-21 10:07:37 +00003397
James Ward58dec6b2020-09-11 17:32:44 +01003398 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003399 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData,
keidav011b3e2ea2019-02-21 10:07:37 +00003400 layerName.c_str());
3401
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003402 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00003403
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003404 // The model does not specify the output shapes.
3405 // The output shapes are calculated from the max_detection and max_classes_per_detection.
3406 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
Mike Kelly377fb212023-01-10 15:55:28 +00003407 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox, 4 });
3408 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox });
3409 m_OverriddenOutputShapes.push_back({ 1, numDetectedBox });
3410 m_OverriddenOutputShapes.push_back({ 1 });
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003411
keidav011b3e2ea2019-02-21 10:07:37 +00003412 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
3413 {
Mike Kelly377fb212023-01-10 15:55:28 +00003414 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverriddenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00003415 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
3416 }
3417
3418 // Register the input connection slots for the layer, connections are made after all layers have been created
3419 // only the tensors for the inputs are relevant, exclude the const tensors
3420 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3421 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3422
3423 // Register the output connection slots for the layer, connections are made after all layers have been created
3424 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3425 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
3426 outputTensorIndexes[1],
3427 outputTensorIndexes[2],
3428 outputTensorIndexes[3]});
3429}
3430
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003431/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
Kevin May7d96b162021-02-03 17:38:41 +00003432void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003433{
3434 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3435
3436 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3437 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3438 CHECK_VALID_SIZE(outputs.size(), 1);
3439
3440 if (inputs.size() < 1)
3441 {
3442 throw ParseException("Pack must have at least one input.");
3443 }
3444
3445 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3446 const auto* options = operatorPtr->builtin_options.AsPackOptions();
3447
3448 StackDescriptor desc;
3449 desc.m_Axis = static_cast<uint32_t>(options->axis);
3450 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
3451
3452 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
Mike Kelly377fb212023-01-10 15:55:28 +00003453 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003454 desc.m_InputShape = inputTensorInfo.GetShape();
3455
James Ward58dec6b2020-09-11 17:32:44 +01003456 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003457 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
3458
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003459 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003460
Mike Kelly377fb212023-01-10 15:55:28 +00003461 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01003462 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3463
3464 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3465 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
3466
3467 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3468 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3469}
3470
Mike Kelly5880b912022-01-28 16:18:54 +00003471void TfLiteParserImpl::ParseUnidirectionalSequenceLSTM(size_t subgraphIndex, size_t operatorIndex)
3472{
3473 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3474
3475 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3476 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3477
3478 if (inputs.size() < 2)
3479 {
3480 throw ParseException("UnidirectionalSequenceLSTM must have at least 2 input.");
3481 }
3482
3483 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3484 const auto& subgraphPtr = m_Model->subgraphs[subgraphIndex];
3485 const auto nodeParams = operatorPtr->builtin_options.AsUnidirectionalSequenceLSTMOptions();
3486 CHECK_SUPPORTED_FUSED_ACTIVATION(nodeParams, subgraphIndex, operatorIndex);
Mike Kelly377fb212023-01-10 15:55:28 +00003487 auto inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly5880b912022-01-28 16:18:54 +00003488 auto outputTensorInfo = ToTensorInfo(outputs[0]);
3489
3490 // Set the params structure for the AddUnidirectionalSequenceLstmLayer call
3491 // Please refer to each operand at
3492 // https://www.tensorflow.org/mlir/tfl_ops#tflunidirectional_sequence_lstm_tflunidirectionalsequencelstmop
3493 armnn::LstmInputParams params;
3494
3495 if (IsOptionalOperandPresent(operatorPtr->inputs[1]))
3496 {
3497 params.m_InputToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[1]].get(),
3498 inputTensorInfo).first;
3499 }
3500
3501 params.m_InputToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[2]].get(),
3502 inputTensorInfo).first;
3503 params.m_InputToCellWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[3]].get(),
3504 inputTensorInfo).first;
3505 params.m_InputToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[4]].get(),
3506 inputTensorInfo).first;
3507
3508 // Recurrent weight tensors of size {n_cell, n_output}
3509 if (IsOptionalOperandPresent(operatorPtr->inputs[5]))
3510 {
3511 params.m_RecurrentToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[5]].get(),
3512 inputTensorInfo).first;
3513 }
3514
3515 params.m_RecurrentToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[6]].get(),
3516 inputTensorInfo).first;
3517 params.m_RecurrentToCellWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[7]].get(),
3518 inputTensorInfo).first;
3519 params.m_RecurrentToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[8]].get(),
3520 inputTensorInfo).first;
3521
3522 // Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
3523 if (IsOptionalOperandPresent(operatorPtr->inputs[9]))
3524 {
3525 params.m_CellToInputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[9]].get(),
3526 inputTensorInfo).first;
3527 }
3528
3529 if (IsOptionalOperandPresent(operatorPtr->inputs[10]))
3530 {
3531 params.m_CellToForgetWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[10]].get(),
3532 inputTensorInfo).first;
3533 }
3534
3535 if (IsOptionalOperandPresent(operatorPtr->inputs[11]))
3536 {
3537 params.m_CellToOutputWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[11]].get(),
3538 inputTensorInfo).first;
3539 }
3540
3541 // Gates bias tensors of size {n_cell}
3542 if (IsOptionalOperandPresent(operatorPtr->inputs[12]))
3543 {
3544 params.m_InputGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[12]].get(),
3545 inputTensorInfo).first;
3546 }
3547
3548 params.m_ForgetGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[13]].get(),
3549 inputTensorInfo).first;
3550 params.m_CellBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[14]].get(),
3551 inputTensorInfo).first;
3552 params.m_OutputGateBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[15]].get(),
3553 inputTensorInfo).first;
3554
3555 // Projection weight tensor of size {n_output, n_cell}
3556 if (IsOptionalOperandPresent(operatorPtr->inputs[16]))
3557 {
3558 params.m_ProjectionWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[16]].get(),
3559 inputTensorInfo).first;
3560 }
3561 // Projection bias tensor of size {n_output}
3562 if (IsOptionalOperandPresent(operatorPtr->inputs[17]))
3563 {
3564 params.m_ProjectionBias = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[17]].get(),
3565 inputTensorInfo).first;
3566 }
3567
3568 // These state tensors are defined as variable tensors, and will be modified by this op.
3569 armnn::TensorInfo outputStateInInfo = ToTensorInfo(subgraphPtr->tensors[operatorPtr->inputs[18]].get());
3570 m_ConstantsToBeCreated.push_back(operatorPtr->inputs[18]);
3571 armnn::TensorInfo cellStateInInfo = ToTensorInfo(subgraphPtr->tensors[operatorPtr->inputs[19]].get());
3572 m_ConstantsToBeCreated.push_back(operatorPtr->inputs[19]);
3573
3574 // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix.
3575 if (inputs.size() >= 21 && IsOptionalOperandPresent(operatorPtr->inputs[20]))
3576 {
3577 params.m_InputLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[20]].get(),
3578 inputTensorInfo).first;
3579 }
3580
3581 if (inputs.size() >= 22 && IsOptionalOperandPresent(operatorPtr->inputs[21]))
3582 {
3583 params.m_ForgetLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[21]].get(),
3584 inputTensorInfo).first;
3585 }
3586
3587 if (inputs.size() >= 23 && IsOptionalOperandPresent(operatorPtr->inputs[22]))
3588 {
3589 params.m_CellLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[22]].get(),
3590 inputTensorInfo).first;
3591 }
3592
3593 if (inputs.size() >= 24 && IsOptionalOperandPresent(operatorPtr->inputs[23]))
3594 {
3595 params.m_OutputLayerNormWeights = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->inputs[23]].get(),
3596 inputTensorInfo).first;
3597 }
3598
3599 // set the layer descriptor
3600 armnn::UnidirectionalSequenceLstmDescriptor desc;
3601 desc.m_ActivationFunc = nodeParams->fused_activation_function;
3602 desc.m_ClippingThresCell = nodeParams->cell_clip;
3603 desc.m_ClippingThresProj = nodeParams->proj_clip;
3604 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr
3605 || params.m_RecurrentToInputWeights == nullptr
3606 || params.m_InputGateBias == nullptr);
3607 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr);
3608 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
3609 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr
3610 || params.m_ForgetLayerNormWeights != nullptr
3611 || params.m_CellLayerNormWeights != nullptr
3612 || params.m_OutputLayerNormWeights != nullptr);
3613 desc.m_TimeMajor = nodeParams->time_major;
3614
Mike Kellyc0800a32022-06-15 10:57:52 +01003615 if (operatorPtr->intermediates.size() > 3 && desc.m_LayerNormEnabled)
Mike Kelly5880b912022-01-28 16:18:54 +00003616 {
3617 auto inputIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[0]].get(),
3618 inputTensorInfo).first;
3619 auto inputIntermediateTensorInfo = inputIntermediate->GetInfo();
3620 desc.m_InputIntermediateScale = inputIntermediateTensorInfo.GetQuantizationScale();
3621
3622 auto forgetIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[1]].get(),
3623 inputTensorInfo).first;
3624 auto forgetIntermediateTensorInfo = forgetIntermediate->GetInfo();
3625 desc.m_ForgetIntermediateScale = forgetIntermediateTensorInfo.GetQuantizationScale();
3626
3627 auto cellIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[2]].get(),
3628 inputTensorInfo).first;
3629 auto cellIntermediateTensorInfo = cellIntermediate->GetInfo();
3630 desc.m_CellIntermediateScale = cellIntermediateTensorInfo.GetQuantizationScale();
3631
3632 auto outputIntermediate = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[3]].get(),
3633 inputTensorInfo).first;
3634 auto outputIntermediateTensorInfo = outputIntermediate->GetInfo();
3635 desc.m_OutputIntermediateScale = outputIntermediateTensorInfo.GetQuantizationScale();
3636 }
3637 else
3638 {
3639 float defaultIntermediate = std::pow(2, -12);
3640 desc.m_InputIntermediateScale = defaultIntermediate;
3641 desc.m_ForgetIntermediateScale = defaultIntermediate;
3642 desc.m_CellIntermediateScale = defaultIntermediate;
3643 desc.m_OutputIntermediateScale = defaultIntermediate;
3644 }
3645
Mike Kellyc0800a32022-06-15 10:57:52 +01003646 if (operatorPtr->intermediates.size() > 4)
3647 {
3648 auto hiddentensor = CreateConstTensorPtr(subgraphPtr->tensors[operatorPtr->intermediates[4]].get(),
3649 inputTensorInfo).first;
Mike Kelly5880b912022-01-28 16:18:54 +00003650
Mike Kellyc0800a32022-06-15 10:57:52 +01003651 desc.m_HiddenStateScale = hiddentensor->GetInfo().GetQuantizationScale();
3652 desc.m_HiddenStateZeroPoint = hiddentensor->GetInfo().GetQuantizationOffset();
3653 }
Mike Kelly5880b912022-01-28 16:18:54 +00003654 unsigned int batchSize = inputTensorInfo.GetShape()[0];
3655 unsigned int outputSize = outputTensorInfo.GetShape()[2];
3656 unsigned int numUnits = cellStateInInfo.GetShape()[1];
3657
3658 armnn::DataType dataType = inputTensorInfo.GetDataType();
3659 float qScale = inputTensorInfo.GetQuantizationScale();
3660 float qOffset = inputTensorInfo.GetQuantizationOffset();
3661
3662 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset);
3663 if (!desc.m_CifgEnabled)
3664 {
3665 scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
3666 }
3667 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits},
3668 cellStateInInfo.GetDataType(),
3669 cellStateInInfo.GetQuantizationScale(),
3670 cellStateInInfo.GetQuantizationOffset());
3671 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
3672
3673 armnn::LstmInputParamsInfo paramsInfo;
3674 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
3675 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
3676 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
3677 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
3678 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
3679 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
3680 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
3681 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
3682 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
3683
3684 if (!desc.m_CifgEnabled)
3685 {
3686 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
3687 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
3688 if (params.m_CellToInputWeights != nullptr)
3689 {
3690 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
3691 }
3692 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
3693 }
3694
3695 if (desc.m_ProjectionEnabled)
3696 {
3697 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
3698 if (params.m_ProjectionBias != nullptr)
3699 {
3700 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
3701 }
3702 }
3703
3704 if (desc.m_PeepholeEnabled)
3705 {
3706 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
3707 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
3708 }
3709
3710 if (desc.m_LayerNormEnabled)
3711 {
3712 if(!desc.m_CifgEnabled)
3713 {
3714 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
3715 }
3716 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
3717 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
3718 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
3719 }
3720
3721 auto layerName = fmt::format("UnidirectionalSequenceLSTM:{}:{}", subgraphIndex, operatorIndex);
3722 armnn::IConnectableLayer* layer = m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
3723 ARMNN_ASSERT(layer != nullptr);
3724
3725 // register the input connection slots for the layer, connections are made after all layers have been created
3726 // only the tensors for the inputs are relevant, exclude the const tensors
3727 auto inputTensorIndexes = AsUnsignedVector({operatorPtr->inputs[0],
3728 operatorPtr->inputs[18],
3729 operatorPtr->inputs[19]});
3730 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0],
3731 inputTensorIndexes[1],
3732 inputTensorIndexes[2]});
3733
3734 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3735
3736 layer->GetOutputSlot(0).SetTensorInfo(outputStateOutTensorInfo);
3737 layer->GetOutputSlot(1).SetTensorInfo(cellStateOutTensorInfo);
3738 layer->GetOutputSlot(2).SetTensorInfo(outputTensorInfo);
3739
3740 unsigned int tensorIndex = outputTensorIndexes[0];
3741 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(2));
3742 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3743}
3744
Kevin May7d96b162021-02-03 17:38:41 +00003745void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd200e3802019-04-15 09:47:39 +01003746{
3747 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3748
Mike Kelly0d77ae12022-01-07 17:42:27 +00003749 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3750 const auto* options = operatorPtr->builtin_options.AsUnpackOptions();
Nina Drozd200e3802019-04-15 09:47:39 +01003751
3752 // This unpackAxis indicates the axis to unpack
3753 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
3754
3755 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3756 CHECK_VALID_SIZE(inputs.size(), 1);
3757
Mike Kelly377fb212023-01-10 15:55:28 +00003758 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003759
3760 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
3761 {
3762 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003763 fmt::format("The unpack axis: {} cannot be greater than or equal to "
3764 "the number of input dimension {} {}",
3765 unpackAxis,
3766 inputTensorInfo.GetNumDimensions(),
3767 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003768 }
3769
Nina Drozd200e3802019-04-15 09:47:39 +01003770 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
3771 // If num is not defined, automatically infer from the length of the dimension axis.
3772 if(unpackNum == 0)
3773 {
3774 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
3775 }
3776
3777 // If unpack number cannot be inferred and is still zero, throw ParseException.
3778 if(unpackNum == 0)
3779 {
3780 throw ParseException("Number to unpack must greater than zero.");
3781 }
3782
3783 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3784 CHECK_VALID_SIZE(outputs.size(), unpackNum);
3785
3786 auto inputDimSize = inputTensorInfo.GetNumDimensions();
3787 std::vector<unsigned int> unpackDimSizes(inputDimSize);
3788
3789 // Add current input shape to unpackDimSizes
3790 for (unsigned int i = 0; i < inputDimSize; ++i)
3791 {
3792 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
3793 }
3794
3795 if (unpackDimSizes[unpackAxis] != unpackNum)
3796 {
3797 throw ParseException("Number to unpack must be the same as length of the dimension to "
3798 "unpack along.");
3799 }
3800
3801 unpackDimSizes[unpackAxis] /= unpackNum;
3802
3803 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
3804 for (unsigned int j = 0; j < unpackNum; ++j)
3805 {
3806 // Set the size of the views.
3807 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
3808 {
3809 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
3810 }
3811 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
3812 }
3813
James Ward58dec6b2020-09-11 17:32:44 +01003814 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01003815 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003816 ARMNN_ASSERT(layer != nullptr);
Nina Drozd200e3802019-04-15 09:47:39 +01003817
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003818 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
3819 unpackDimSizes.data());
3820
Nina Drozd200e3802019-04-15 09:47:39 +01003821 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3822 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3823
Finn Williamsb49ed182021-06-29 15:50:08 +01003824 std::vector<unsigned int> reshapeDims;
3825 for (unsigned int axis = 0; axis < splitOutShape.GetNumDimensions(); ++axis)
3826 {
3827 if (axis != unpackAxis)
3828 {
3829 reshapeDims.push_back(splitOutShape[axis]);
3830 }
3831 }
3832
3833 TensorShape reshapeOutputShape(splitOutShape.GetNumDimensions() -1, reshapeDims.data());
3834
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003835 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
3836 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
3837 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01003838 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01003839 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003840 armnn::ReshapeDescriptor desc;
Finn Williamsb49ed182021-06-29 15:50:08 +01003841 desc.m_TargetShape = reshapeOutputShape;
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003842 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
3843
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01003844 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
3845 outputTensorInfo.GetDataType(),
3846 outputTensorInfo.GetQuantizationScale(),
3847 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003848 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
3849
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01003850 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01003851
3852 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
3853 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
3854 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
3855 }
Nina Drozd200e3802019-04-15 09:47:39 +01003856}
3857
Kevin May7d96b162021-02-03 17:38:41 +00003858void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd0324f482019-04-08 10:52:10 +01003859{
3860 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3861
Mike Kelly0d77ae12022-01-07 17:42:27 +00003862 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3863 const auto* options = operatorPtr->builtin_options.AsSplitOptions();
Nina Drozd0324f482019-04-08 10:52:10 +01003864
3865 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
3866
Nina Drozd200e3802019-04-15 09:47:39 +01003867 // If number of splits cannot be inferred and is zero, throw ParseException.
3868 if(numSplits == 0)
3869 {
3870 throw ParseException("Number to splits must greater than zero.");
3871 }
3872
Nina Drozd0324f482019-04-08 10:52:10 +01003873 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3874 CHECK_VALID_SIZE(inputs.size(), 2);
3875 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3876 CHECK_VALID_SIZE(outputs.size(), numSplits);
3877
Mike Kelly377fb212023-01-10 15:55:28 +00003878 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
3879 armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003880 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Nina Drozd0324f482019-04-08 10:52:10 +01003881
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003882 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003883 if (axisBufferPtr == nullptr)
3884 {
3885 throw ParseException(
3886 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
3887 CHECK_LOCATION().AsString()));
3888 }
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003889
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003890 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
3891 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
3892 int32_t axis = axisData[0];
3893
3894 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3895 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3896 {
3897 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
3898 // E.g. Rank 4 tensor can have axis in range [-4, 3)
3899 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
3900 throw ParseException(
3901 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
3902 axis,
3903 CHECK_LOCATION().AsString()));
3904 }
3905
3906 const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
Nina Drozd0324f482019-04-08 10:52:10 +01003907
Nina Drozd0324f482019-04-08 10:52:10 +01003908 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003909 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01003910 {
3911 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003912 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
3913 inputTensorInfo.GetNumDimensions(),
3914 MaxNumOfTensorDimensions,
3915 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01003916 }
3917
3918 std::vector<unsigned int> splitterDimSizes(inputDimSize);
3919
3920 // Add current input shape to splitterDimSizes
3921 for (unsigned int i = 0; i < inputDimSize; ++i)
3922 {
3923 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
3924 }
3925
3926 if (splitterDimSizes[splitDim] % numSplits != 0)
3927 {
3928 throw ParseException("Number of splits must evenly divide the dimension");
3929 }
3930 splitterDimSizes[splitDim] /= numSplits;
3931
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003932 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01003933 for (unsigned int j = 0; j < numSplits; ++j)
3934 {
3935 // Set the size of the views.
3936 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
3937 {
3938 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
3939 }
3940 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
3941 }
3942
James Ward58dec6b2020-09-11 17:32:44 +01003943 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01003944 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003945 ARMNN_ASSERT(layer != nullptr);
Nina Drozd0324f482019-04-08 10:52:10 +01003946
3947 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003948 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01003949
Nina Drozd0324f482019-04-08 10:52:10 +01003950 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
3951 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01003952 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01003953 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01003954 }
3955
3956 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3957 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3958}
3959
Derek Lambertif0176992020-04-28 13:37:49 +01003960unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
3961{
3962 int numDims = armnn::numeric_cast<int>(numDimsIn);
3963 int v = idx < 0 ? numDims + idx : idx;
3964 ARMNN_ASSERT(v >= 0);
3965 ARMNN_ASSERT(v < numDims);
3966
3967 return static_cast<unsigned int>(v);
3968}
3969
Kevin May7d96b162021-02-03 17:38:41 +00003970void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
Derek Lambertif0176992020-04-28 13:37:49 +01003971{
3972 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3973
Mike Kelly0d77ae12022-01-07 17:42:27 +00003974 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3975 const auto* options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01003976
3977 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3978 CHECK_VALID_SIZE(inputs.size(), 3);
3979
3980 auto& inputTensor = inputs[0];
3981 auto& splitsTensor = inputs[1];
3982 auto& axisTensor = inputs[2];
3983
3984 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
3985 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
3986 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
3987 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
3988
3989 // Inputs
3990 auto inputDimSize = inputTensorInfo.GetNumDimensions();
3991 if (inputDimSize > MaxNumOfTensorDimensions)
3992 {
3993 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003994 fmt::format("The number of dimensions: {} for input tensors of the "
3995 "SplitV op cannot be greater than {} {}",
3996 inputTensorInfo.GetNumDimensions(),
3997 MaxNumOfTensorDimensions,
3998 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01003999 }
4000
4001 // Get split axis
4002 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004003 if (axisBufferPtr == nullptr)
4004 {
4005 throw ParseException(
4006 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4007 CHECK_LOCATION().AsString()));
4008 }
4009
Derek Lambertif0176992020-04-28 13:37:49 +01004010 std::vector<int> axisData(axisTensorInfo.GetNumElements());
4011 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004012 int32_t axis = axisData[0];
4013
4014 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4015 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4016 {
4017 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4018 // E.g. Rank 4 tensor can have axis in range [-4, 3)
4019 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4020 throw ParseException(
4021 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4022 axis,
4023 CHECK_LOCATION().AsString()));
4024 }
4025 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
Derek Lambertif0176992020-04-28 13:37:49 +01004026
Derek Lambertif0176992020-04-28 13:37:49 +01004027 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01004028 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01004029 unsigned int numSplits{0};
4030
4031 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01004032 {
4033 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01004034 }
4035 else
4036 {
Ryan OShea86704732020-05-26 11:41:04 +01004037 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01004038 }
4039
4040 if (numSplits <=0)
4041 {
4042 throw ParseException("SplitV has invalid number of splits");
4043 }
4044
Jan Eilersc0761e92020-06-29 16:48:44 +01004045 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01004046 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01004047 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01004048
Jan Eilersc0761e92020-06-29 16:48:44 +01004049 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01004050 int numInferred{0};
4051 unsigned int inferIdx{0};
4052 int splitSum{0};
4053 for (auto split : splitsData)
4054 {
4055 if (split < 0)
4056 {
4057 numInferred++;
4058 inferIdx = idx;
4059 }
4060 else
4061 {
4062 splitSum += split;
4063 }
4064 idx++;
4065 }
4066 // Check for inferred Axis
4067 if (numInferred == 0)
4068 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004069 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01004070 {
4071 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
4072 }
4073 }
4074 else if (numInferred == 1)
4075 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004076 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01004077 }
4078 else
4079 {
4080 throw ParseException("Cannot infer split size for more than one split");
4081 }
4082
Derek Lambertif0176992020-04-28 13:37:49 +01004083 //Ouput size validation
4084 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4085 CHECK_VALID_SIZE(outputs.size(), numSplits);
4086
4087 // Setup Armnn descriptor
4088 SplitterDescriptor splitDesc(numSplits, inputDimSize);
4089 unsigned int accumSplit = 0;
4090 for (unsigned int j = 0; j < numSplits; ++j)
4091 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01004092 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01004093
4094 // Set the size of the views.
4095 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
4096 {
4097 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
4098 if (dimIdx == splitDim)
4099 {
4100 dimSize = splitSize;
4101 }
4102 splitDesc.SetViewSize(j, dimIdx, dimSize);
4103 }
4104
4105 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
4106 accumSplit += splitSize;
4107 }
4108
James Ward58dec6b2020-09-11 17:32:44 +01004109 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01004110 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01004111 ARMNN_ASSERT(layer != nullptr);
Derek Lambertif0176992020-04-28 13:37:49 +01004112
4113 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4114 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4115
4116 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
4117 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01004118 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01004119 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
4120 }
4121
4122 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4123 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4124}
4125
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004126void TfLiteParserImpl::ParseArgMin(size_t subgraphIndex, size_t operatorIndex)
4127{
4128 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Min);
4129}
4130
Kevin May7d96b162021-02-03 17:38:41 +00004131void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
Inki Daed4619e22020-09-10 15:33:54 +09004132{
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004133 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Max);
4134}
4135
4136void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, ArgMinMaxFunction argMinMaxFunction)
4137{
Inki Daed4619e22020-09-10 15:33:54 +09004138 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4139 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4140 CHECK_VALID_SIZE(inputs.size(), 2);
4141
4142 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4143 CHECK_VALID_SIZE(outputs.size(), 1);
4144
Mike Kelly377fb212023-01-10 15:55:28 +00004145 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4146 armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Inki Daed4619e22020-09-10 15:33:54 +09004147 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004148 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004149
4150 // Check if output tensor type is Signed32 or Signed64
Mike Kelly1f140f72021-04-06 12:25:55 +01004151 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
4152 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
4153 {
4154 throw ParseException(
4155 fmt::format(
4156 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
4157 CHECK_LOCATION().AsString()));
4158 }
Matthew Sloyan28f177c2021-04-09 14:38:52 +01004159
4160 // Get const axis value from model and set it to descriptor.
4161 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
4162 if (axisBufferPtr == nullptr)
4163 {
4164 throw ParseException(
4165 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
4166 CHECK_LOCATION().AsString()));
4167 }
4168
4169 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
4170 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
4171 int32_t axis = axisData.front();
4172
4173 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4174 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4175 {
4176 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
4177 // E.g. Rank 4 tensor can have axis in range [-4, 3)
4178 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
4179 throw ParseException(
4180 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
4181 axis,
4182 CHECK_LOCATION().AsString()));
4183 }
4184
4185 ArgMinMaxDescriptor desc;
4186 desc.m_Axis = axis;
4187 desc.m_Function = argMinMaxFunction;
4188
4189 // Register a ArgMin/ArgMax layer.
4190 auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}";
4191 auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4192 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str());
4193 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00004194 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Inki Daed4619e22020-09-10 15:33:54 +09004195 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4196
4197 // Register input tensor to the layer.
4198 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4199 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4200
4201 // Register output tensor to the layer.
4202 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4203 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4204}
4205
Kevin May7d96b162021-02-03 17:38:41 +00004206void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00004207{
4208 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4209
Kevin May7d96b162021-02-03 17:38:41 +00004210 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004211 CHECK_VALID_SIZE(inputs.size(), 2);
Kevin May7d96b162021-02-03 17:38:41 +00004212 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004213 CHECK_VALID_SIZE(outputs.size(), 1);
4214
Mike Kelly377fb212023-01-10 15:55:28 +00004215 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4216 armnn::TensorInfo indicesTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
4217 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Sadik Armagan26868492021-01-22 14:25:31 +00004218
4219 armnn::GatherDescriptor gatherDescriptor;
4220
Mike Kelly0d77ae12022-01-07 17:42:27 +00004221 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4222 const auto* options = operatorPtr->builtin_options.AsGatherOptions();
Sadik Armagan26868492021-01-22 14:25:31 +00004223 auto axis = options->axis;
4224
Mike Kelly377fb212023-01-10 15:55:28 +00004225 auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
4226
Sadik Armagan26868492021-01-22 14:25:31 +00004227 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
4228 auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
4229 auto outputDimensions = outputTensorInfo.GetNumDimensions();
4230 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
4231 {
4232 throw ParseException(
4233 fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
4234 axis,
4235 inputDimensions, inputDimensions,
4236 CHECK_LOCATION().AsString()));
4237 }
4238 if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
4239 {
4240 throw ParseException(
4241 fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
4242 outputDimensions,
4243 inputDimensions, indicesDimensions,
4244 CHECK_LOCATION().AsString()));
4245 }
4246
4247 gatherDescriptor.m_Axis = axis;
4248
Sadik Armagan26868492021-01-22 14:25:31 +00004249 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
4250 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00004251 outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Sadik Armagan26868492021-01-22 14:25:31 +00004252 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4253
4254 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4255 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4256
4257 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4258 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4259}
4260
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004261void TfLiteParserImpl::ParseGatherNd(size_t subgraphIndex, size_t operatorIndex)
4262{
4263 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4264
4265 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4266 CHECK_VALID_SIZE(inputs.size(), 2);
4267 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4268 CHECK_VALID_SIZE(outputs.size(), 1);
4269
Mike Kelly377fb212023-01-10 15:55:28 +00004270 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4271 armnn::TensorInfo indicesTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004272
4273 auto layerName = fmt::format("GatherNd:{}:{}", subgraphIndex, operatorIndex);
4274 IConnectableLayer* layer = m_Network->AddGatherNdLayer(layerName.c_str());
4275 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00004276 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Teresa Charlin91a53ea2022-04-25 15:47:29 +01004277 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4278
4279 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4280 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4281
4282 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4283 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4284}
4285
Kevin May7d96b162021-02-03 17:38:41 +00004286void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00004287{
4288 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4289
Kevin May7d96b162021-02-03 17:38:41 +00004290 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004291 CHECK_VALID_SIZE(inputs.size(), 1);
Kevin May7d96b162021-02-03 17:38:41 +00004292 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00004293 CHECK_VALID_SIZE(outputs.size(), 1);
4294
4295 armnn::DepthToSpaceDescriptor descriptor;
4296
Mike Kelly0d77ae12022-01-07 17:42:27 +00004297 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4298 const auto* options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
Sadik Armagan26868492021-01-22 14:25:31 +00004299 auto blockSize = options->block_size;
4300 if (blockSize < 2)
4301 {
4302 throw ParseException(
4303 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
4304 blockSize,
4305 CHECK_LOCATION().AsString()));
4306 }
4307 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
4308
4309 auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
4310 IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
4311 ARMNN_ASSERT(layer != nullptr);
Mike Kelly377fb212023-01-10 15:55:28 +00004312 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan26868492021-01-22 14:25:31 +00004313 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4314
4315 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4316 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4317
4318 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4319 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4320}
4321
Kevin May7d96b162021-02-03 17:38:41 +00004322void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004323{
Sadik Armagana2747482021-02-09 10:28:54 +00004324 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Sum);
4325}
4326
Teresa Charlin4e3e8312021-08-05 12:34:37 +01004327void TfLiteParserImpl::ParseReduceProd(size_t subgraphIndex, size_t operatorIndex)
4328{
4329 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Prod);
4330}
4331
Sadik Armagana2747482021-02-09 10:28:54 +00004332void TfLiteParserImpl::ParseReduceMax(size_t subgraphIndex, size_t operatorIndex)
4333{
4334 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Max);
4335}
4336
4337void TfLiteParserImpl::ParseReduceMin(size_t subgraphIndex, size_t operatorIndex)
4338{
4339 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Min);
4340}
4341
4342void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, ReduceOperation reduceOperation)
4343{
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004344 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4345
Mike Kelly0d77ae12022-01-07 17:42:27 +00004346 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4347 const auto* options = operatorPtr->builtin_options.AsReducerOptions();
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004348
4349 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4350 CHECK_VALID_SIZE(inputs.size(), 2);
4351
4352 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4353 CHECK_VALID_SIZE(outputs.size(), 1);
4354
Sadik Armagana2747482021-02-09 10:28:54 +00004355 auto layerName = fmt::format("Reduce:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004356
Mike Kelly377fb212023-01-10 15:55:28 +00004357 armnn::TensorInfo inputTensorInfo0 = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4358 armnn::TensorInfo inputTensorInfo1 = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004359
4360 ReduceDescriptor desc;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004361 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
4362 // Get const axis value from model and set it to descriptor.
4363 if (axisBufferPtr != nullptr)
4364 {
Sadik Armagan49bdb792021-02-11 13:57:07 +00004365 std::vector<int32_t> axisData(inputTensorInfo1.GetNumElements());
4366 ::memcpy(axisData.data(), axisBufferPtr->data.data(), inputTensorInfo1.GetNumBytes());
4367
4368 // Convert the axis to unsigned int and remove duplicates.
4369 auto rank = static_cast<int32_t>(inputTensorInfo0.GetNumDimensions());
4370 std::set<unsigned int> uniqueAxis;
4371 std::transform(axisData.begin(),
4372 axisData.end(),
4373 std::inserter(uniqueAxis, uniqueAxis.begin()),
4374 [rank](int i)->unsigned int{
4375 return static_cast<uint32_t>(((i + rank) % rank)); });
4376 desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004377 }
Sadik Armagana2747482021-02-09 10:28:54 +00004378 else
4379 {
4380 for (uint32_t i = 0; i < inputTensorInfo0.GetNumDimensions(); ++i)
4381 {
4382 desc.m_vAxis.push_back(i);
4383 }
4384 }
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004385
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004386 desc.m_KeepDims = options->keep_dims;
Sadik Armagana2747482021-02-09 10:28:54 +00004387 desc.m_ReduceOperation = reduceOperation;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004388
4389 // Register a new layer object, Sum.
Mike Kelly0d77ae12022-01-07 17:42:27 +00004390 IConnectableLayer* layer = m_Network->AddReduceLayer(desc, layerName.c_str());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004391
Mike Kelly377fb212023-01-10 15:55:28 +00004392 armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00004393 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4394
4395 // Register input tensor to the layer.
4396 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4397 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4398
4399 // Register output tensor to the layer.
4400 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4401 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4402}
4403
Mike Kelly31dce2b2021-09-01 21:22:37 +01004404void TfLiteParserImpl::ParseLocalResponseNormalization(size_t subgraphIndex, size_t operatorIndex)
4405{
4406 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4407
4408 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4409 CHECK_VALID_SIZE(inputs.size(), 1);
4410
4411 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4412 CHECK_VALID_SIZE(outputs.size(), 1);
4413
4414 auto layerName = fmt::format("LRN:{}:{}", subgraphIndex, operatorIndex);
4415 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4416
Mike Kelly377fb212023-01-10 15:55:28 +00004417 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
Mike Kelly31dce2b2021-09-01 21:22:37 +01004418
4419 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
4420 const auto* options = operatorPtr->builtin_options.AsLocalResponseNormalizationOptions();
4421
4422 armnn::NormalizationDescriptor descriptor;
4423 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4424 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
4425 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
4426 descriptor.m_NormSize = static_cast<uint32_t>(options->radius);
4427 descriptor.m_K = options->bias;
4428 descriptor.m_Alpha = options->alpha;
4429 descriptor.m_Beta = options->beta;
4430
4431 // ArmNN expects normSize to be the full size of the normalization
4432 // window rather than the radius as in TfLite.
4433 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
4434
4435 IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor, layerNameFormatted.c_str());
4436 ARMNN_ASSERT(layer != nullptr);
4437
Mike Kelly377fb212023-01-10 15:55:28 +00004438 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Mike Kelly31dce2b2021-09-01 21:22:37 +01004439 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4440
4441 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4442 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4443
4444 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4445 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4446}
4447
Teresa Charlin28aa6692022-07-12 11:18:44 +01004448void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
4449{
4450 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
4451}
4452
4453void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
4454{
4455 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
4456}
4457
4458void TfLiteParserImpl::ParseLog(size_t subgraphIndex, size_t operatorIndex)
4459{
4460 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Log);
4461}
4462
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004463void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
4464{
4465 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
4466}
4467
4468void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
4469{
4470 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
4471}
4472
4473void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
4474{
4475 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
4476}
4477
Teresa Charlin28aa6692022-07-12 11:18:44 +01004478void TfLiteParserImpl::ParseSin(size_t subgraphIndex, size_t operatorIndex)
4479{
4480 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sin);
4481}
4482
Teresa Charlinf0fce5b2022-05-04 17:24:43 +01004483void TfLiteParserImpl::ParseSqrt(size_t subgraphIndex, size_t operatorIndex)
4484{
4485 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Sqrt);
4486}
4487
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004488void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
4489{
4490 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4491
4492 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4493 CHECK_VALID_SIZE(inputs.size(), 1);
4494
4495 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4496 CHECK_VALID_SIZE(outputs.size(), 1);
4497
4498 std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
4499 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4500
4501 ElementwiseUnaryDescriptor desc;
4502 desc.m_Operation = unaryOperation;
4503 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
4504 ARMNN_ASSERT(layer != nullptr);
4505
Mike Kelly377fb212023-01-10 15:55:28 +00004506 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
Matthew Sloyaned7fce42021-04-15 20:46:24 +01004507 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4508
4509 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4510 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
4511
4512 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4513 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
4514}
4515
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03004516void TfLiteParserImpl::ParseEqual(size_t subgraphIndex, size_t operatorIndex)
4517{
4518 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Equal);
4519}
4520
4521void TfLiteParserImpl::ParseNotEqual(size_t subgraphIndex, size_t operatorIndex)
4522{
4523 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::NotEqual);
4524}
4525
4526void TfLiteParserImpl::ParseGreater(size_t subgraphIndex, size_t operatorIndex)
4527{
4528 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Greater);
4529}
4530
4531void TfLiteParserImpl::ParseGreaterOrEqual(size_t subgraphIndex, size_t operatorIndex)
4532{
4533 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::GreaterOrEqual);
4534}
4535
4536void TfLiteParserImpl::ParseLess(size_t subgraphIndex, size_t operatorIndex)
4537{
4538 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Less);
4539}
4540
4541void TfLiteParserImpl::ParseLessOrEqual(size_t subgraphIndex, size_t operatorIndex)
4542{
4543 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::LessOrEqual);
4544}
4545
4546void TfLiteParserImpl::ParseComparison(size_t subgraphIndex, size_t operatorIndex,
4547 ComparisonOperation comparisonOperation)
4548{
4549 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
4550
4551 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
4552 CHECK_VALID_SIZE(inputs.size(), 2);
4553
4554 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
4555 CHECK_VALID_SIZE(outputs.size(), 1);
4556
4557 auto layerName = std::string(GetComparisonOperationAsCString(comparisonOperation)) + ":{}:{}";
4558 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
4559
Mike Kelly377fb212023-01-10 15:55:28 +00004560 armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
4561 armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03004562 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerNameFormatted, "Input 0", "Input 1");
4563
4564 ComparisonDescriptor desc;
4565 desc.m_Operation = comparisonOperation;
4566 IConnectableLayer* layer = m_Network->AddComparisonLayer(desc, layerNameFormatted.c_str());
4567 ARMNN_ASSERT(layer != nullptr);
4568
Mike Kelly377fb212023-01-10 15:55:28 +00004569 TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03004570 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
4571
4572 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
4573 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
4574
4575 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
4576 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
4577}
4578
Mike Kelly04d82292023-01-19 18:29:40 +00004579armnn::IConnectableLayer* TfLiteParserImpl::AddReshapeLayer(armnn::IConnectableLayer* layer,
4580 unsigned int outputSlot,
4581 std::string reshapeLayerName,
4582 armnn::TensorInfo outputShape)
4583{
4584 ReshapeDescriptor desc;
4585 desc.m_TargetShape = outputShape.GetShape();
4586
4587 IConnectableLayer* reshapeLayer =
4588 m_Network->AddReshapeLayer(desc, reshapeLayerName.c_str());
4589
4590 auto & prevOutputSlot = layer->GetOutputSlot(outputSlot);
4591 prevOutputSlot.Connect(reshapeLayer->GetInputSlot(0));
4592 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputShape);
4593 return reshapeLayer;
4594}
4595
Kevin May7d96b162021-02-03 17:38:41 +00004596armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
4597 unsigned int outputSlot,
4598 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01004599{
4600 ActivationDescriptor activationDesc;
4601 std::string layerName = prevLayer->GetName();
4602
4603 switch(activationType)
4604 {
4605 case tflite::ActivationFunctionType_NONE:
4606 {
4607 // this is a no-op: return previous layer
4608 return prevLayer;
4609 }
4610 case tflite::ActivationFunctionType_RELU:
4611 {
4612 activationDesc.m_Function = ActivationFunction::ReLu;
4613 layerName += ":RELU";
4614 break;
4615 }
4616 case tflite::ActivationFunctionType_RELU6:
4617 {
4618 activationDesc.m_Function = ActivationFunction::BoundedReLu;
4619 activationDesc.m_A = 6.0f;
4620 activationDesc.m_B = 0.0f;
4621 layerName += ":RELU6";
4622 break;
4623 }
4624 case tflite::ActivationFunctionType_TANH:
4625 {
4626 activationDesc.m_Function = ActivationFunction::TanH;
4627 activationDesc.m_A = 1.0f;
4628 activationDesc.m_B = 1.0f;
4629 layerName += ":TANH";
4630 break;
4631 }
4632
4633 // I only put these here as a reminder what others we could support
4634 case tflite::ActivationFunctionType_RELU_N1_TO_1:
4635 case tflite::ActivationFunctionType_SIGN_BIT:
4636 default:
4637 {
4638 throw ParseException(
Mike Kelly377fb212023-01-10 15:55:28 +00004639 fmt::format("TfLite parser doesn't support fused activation: "
James Ward58dec6b2020-09-11 17:32:44 +01004640 "{}/{} {} ",
4641 activationType,
4642 tflite::EnumNameActivationFunctionType(activationType),
4643 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004644
4645 }
4646 }
4647
4648 IConnectableLayer* activationLayer =
4649 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
4650
4651 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
4652 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
4653 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
4654 return activationLayer;
4655}
4656
Teresa Charlincdbd40b2022-02-25 13:21:55 +00004657armnn::IConnectableLayer* TfLiteParserImpl::AddFusedFloorLayer(armnn::IConnectableLayer* prevLayer,
4658 unsigned int outputSlot)
4659{
Teresa Charlin725728e2022-05-05 13:33:33 +01004660
4661 auto& prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
4662 DataType dataType = prevOutputSlot.GetTensorInfo().GetDataType();
4663
4664 if (dataType == DataType::Signed32)
4665 {
4666 return prevLayer;
4667 }
4668
Teresa Charlincdbd40b2022-02-25 13:21:55 +00004669 std::string layerName = prevLayer->GetName();
4670 IConnectableLayer* floorLayer = m_Network->AddFloorLayer(layerName.c_str());
4671
Teresa Charlincdbd40b2022-02-25 13:21:55 +00004672 prevOutputSlot.Connect(floorLayer->GetInputSlot(0));
4673 floorLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
Teresa Charlin725728e2022-05-05 13:33:33 +01004674
Teresa Charlincdbd40b2022-02-25 13:21:55 +00004675 return floorLayer;
4676}
4677
Mike Kelly0d77ae12022-01-07 17:42:27 +00004678TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char* fileName)
telsoa01c577f2c2018-08-31 09:22:23 +01004679{
4680 if (fileName == nullptr)
4681 {
James Ward58dec6b2020-09-11 17:32:44 +01004682 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01004683 CHECK_LOCATION().AsString()));
4684 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01004685 std::error_code errorCode;
4686 fs::path pathToFile(fileName);
4687 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01004688 {
James Ward58dec6b2020-09-11 17:32:44 +01004689 //fmt::format() could not be used here (format error)
4690 std::stringstream msg;
4691 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
4692 << " " << CHECK_LOCATION().AsString();
4693
4694 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01004695 }
4696 std::ifstream file(fileName, std::ios::binary);
4697 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
4698 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
4699 fileContent.size());
4700}
4701
Mike Kelly0d77ae12022-01-07 17:42:27 +00004702TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromBinary(const uint8_t* binaryContent, size_t len)
telsoa01c577f2c2018-08-31 09:22:23 +01004703{
4704 if (binaryContent == nullptr)
4705 {
James Ward58dec6b2020-09-11 17:32:44 +01004706 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01004707 CHECK_LOCATION().AsString()));
4708 }
4709 flatbuffers::Verifier verifier(binaryContent, len);
4710 if (verifier.VerifyBuffer<tflite::Model>() == false)
4711 {
4712 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004713 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
4714 "flatbuffers format. size:{} {}",
4715 len,
4716 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004717 }
4718 return tflite::UnPackModel(binaryContent);
4719}
4720
Mike Kelly0d77ae12022-01-07 17:42:27 +00004721TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetInputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00004722 size_t subgraphIndex,
4723 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004724{
4725 CHECK_MODEL(model, subgraphIndex, operatorIndex);
4726
Mike Kelly0d77ae12022-01-07 17:42:27 +00004727 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4728 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004729
4730 size_t inputCount = operatorPtr->inputs.size();
mathad01c21025d2021-04-26 10:09:37 +01004731 TensorRawPtrVector result;
Mike Kelly0d77ae12022-01-07 17:42:27 +00004732 for (size_t i = 0; i < inputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004733 {
mathad01c21025d2021-04-26 10:09:37 +01004734 // If the input location is -1 then assume input is turned off.
4735 if (operatorPtr->inputs[i] == -1)
4736 {
4737 continue;
4738 }
4739 else
4740 {
4741 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
4742 result.push_back(subgraphPtr->tensors[inputId].get());
4743 }
telsoa01c577f2c2018-08-31 09:22:23 +01004744 }
4745 return result;
4746}
4747
Mike Kelly0d77ae12022-01-07 17:42:27 +00004748TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetOutputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00004749 size_t subgraphIndex,
4750 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004751{
4752 CHECK_MODEL(model, subgraphIndex, operatorIndex);
4753
Mike Kelly0d77ae12022-01-07 17:42:27 +00004754 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4755 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004756
4757 size_t outputCount = operatorPtr->outputs.size();
4758 TensorRawPtrVector result(outputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004759 for (size_t i = 0; i < outputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004760 {
4761 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
4762 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01004763 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01004764 }
4765 return result;
4766}
4767
Mike Kelly0d77ae12022-01-07 17:42:27 +00004768TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphInputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00004769 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004770{
4771 CHECK_SUBGRAPH(model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004772 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004773
Derek Lambertiff05cc52019-04-26 13:05:17 +01004774 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01004775 TensorIdRawPtrVector result(inputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004776 for (size_t i = 0; i < inputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004777 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01004778 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01004779 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01004780 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01004781 }
4782 return result;
4783}
4784
Mike Kelly0d77ae12022-01-07 17:42:27 +00004785TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphOutputs(const ModelPtr& model,
Kevin May7d96b162021-02-03 17:38:41 +00004786 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004787{
4788 CHECK_SUBGRAPH(model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004789 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004790
Derek Lambertiff05cc52019-04-26 13:05:17 +01004791 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01004792 TensorIdRawPtrVector result(outputCount);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004793 for (size_t i = 0; i < outputCount; ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004794 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01004795 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
4796 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01004797 }
4798 return result;
4799}
4800
Kevin May7d96b162021-02-03 17:38:41 +00004801std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
4802 size_t subgraphIndex,
4803 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004804{
4805 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004806 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4807 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004808 return operatorPtr->inputs;
4809}
4810
Kevin May7d96b162021-02-03 17:38:41 +00004811std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
4812 size_t subgraphIndex,
4813 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004814{
4815 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004816 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4817 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01004818 return operatorPtr->outputs;
4819}
4820
Kevin May7d96b162021-02-03 17:38:41 +00004821void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
4822 size_t operatorIndex,
4823 IConnectableLayer* layer,
Finn Williamsd4fa5452021-03-01 12:31:41 +00004824 const std::vector<unsigned int>& tensorIndexes,
4825 unsigned int startingSlotIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004826{
4827 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01004828 ARMNN_ASSERT(layer != nullptr);
Matthew Sloyan81beae32021-07-13 19:46:11 +01004829
Finn Williamsd4fa5452021-03-01 12:31:41 +00004830 if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots())
telsoa01c577f2c2018-08-31 09:22:23 +01004831 {
4832 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004833 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
4834 " for subgraph:{} operator index:{} {}",
4835 tensorIndexes.size(),
4836 layer->GetNumInputSlots(),
4837 subgraphIndex,
4838 operatorIndex,
4839 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004840 }
4841
Finn Williamsd4fa5452021-03-01 12:31:41 +00004842 for (unsigned int index = 0; index < tensorIndexes.size() ; ++index)
telsoa01c577f2c2018-08-31 09:22:23 +01004843 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00004844 unsigned int tensorIndex = tensorIndexes[index];
4845 armnn::IInputSlot* slot = &(layer->GetInputSlot(startingSlotIndex + index));
telsoa01c577f2c2018-08-31 09:22:23 +01004846 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
4847 }
4848}
4849
Kevin May7d96b162021-02-03 17:38:41 +00004850void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
4851 size_t operatorIndex,
4852 IConnectableLayer* layer,
4853 const std::vector<unsigned int>& tensorIndexes)
telsoa01c577f2c2018-08-31 09:22:23 +01004854{
4855 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01004856 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01004857 if (tensorIndexes.size() != layer->GetNumOutputSlots())
4858 {
4859 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004860 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
4861 " for subgraph:{} operator index:{} {}",
4862 tensorIndexes.size(),
4863 layer->GetNumOutputSlots(),
4864 subgraphIndex,
4865 operatorIndex,
4866 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004867 }
4868
4869 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
4870 {
4871 unsigned int tensorIndex = tensorIndexes[slotIndex];
4872 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
4873 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
4874 }
4875}
4876
Mike Kelly377fb212023-01-10 15:55:28 +00004877void TfLiteParserImpl::SetupInputLayerTensorInfos(size_t subgraphIndex)
4878{
4879 CHECK_SUBGRAPH(m_Model, subgraphIndex);
4880
4881 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
4882 for (auto const& tensorIdAndPtr : inputs)
4883 {
4884 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
4885 m_TensorInfos.insert({tensorIdAndPtr.first, tensorInfo});
4886 }
4887}
4888
Kevin May7d96b162021-02-03 17:38:41 +00004889void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004890{
4891 CHECK_SUBGRAPH(m_Model, subgraphIndex);
4892
4893 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004894 for (auto const& tensorIdAndPtr : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01004895 {
4896 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
4897 IConnectableLayer* layer =
4898 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
4899
4900 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
4901 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
4902
4903 RegisterOutputSlots(subgraphIndex,
4904 VIRTUAL_OPERATOR_ID,
4905 layer,
4906 { static_cast<uint32_t>(tensorIdAndPtr.first) });
4907 }
4908}
4909
Kevin May7d96b162021-02-03 17:38:41 +00004910void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01004911{
4912 CHECK_SUBGRAPH(m_Model, subgraphIndex);
4913
4914 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
Mike Kelly0d77ae12022-01-07 17:42:27 +00004915 for (auto const& tensorIdAndPtr : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01004916 {
4917 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
4918 IConnectableLayer* layer =
4919 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
4920
4921 RegisterInputSlots(subgraphIndex,
4922 VIRTUAL_OPERATOR_ID,
4923 layer,
4924 { static_cast<uint32_t>(tensorIdAndPtr.first) });
4925 }
4926}
4927
Mike Kelly377fb212023-01-10 15:55:28 +00004928void TfLiteParserImpl::SetupConstantLayerTensorInfos(size_t subgraph)
4929{
4930 CHECK_SUBGRAPH(m_Model, subgraph);
4931
4932 const auto & subgraphPtr = m_Model->subgraphs[subgraph];
4933 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
4934 {
4935 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
4936 {
4937 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
4938 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
4939 {
4940 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
4941
4942 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
4943
4944 m_TensorInfos.insert({tensorIndex, tensorInfo});
4945 }
4946 }
4947 }
4948}
4949
Mike Kelly5880b912022-01-28 16:18:54 +00004950void TfLiteParserImpl::SetupConstantLayers(size_t subgraph)
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02004951{
Mike Kelly5880b912022-01-28 16:18:54 +00004952 CHECK_SUBGRAPH(m_Model, subgraph);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02004953
Mike Kelly5880b912022-01-28 16:18:54 +00004954 const auto & subgraphPtr = m_Model->subgraphs[subgraph];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02004955 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
4956 {
4957 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
4958 {
4959 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
4960 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
4961 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01004962 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02004963
Mike Kelly5880b912022-01-28 16:18:54 +00004964 if (IsConstTensor(tensorPtr))
Matthew Sloyan81beae32021-07-13 19:46:11 +01004965 {
4966 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
Mike Kelly5880b912022-01-28 16:18:54 +00004967 armnn::DataType dataType = tensorInfo.GetDataType();
4968
4969 if (std::find(m_ConstantsToDequantize.begin(), m_ConstantsToDequantize.end(), tensorPtr->buffer)
4970 != m_ConstantsToDequantize.end())
4971 {
4972 dataType = DataType::Float32;
4973 }
4974 auto tensorAndData = CreateConstTensorNonPermuted(tensorPtr, tensorInfo, dataType);
4975
4976 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
4977 IConnectableLayer *layer = m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
4978
4979 layer->GetOutputSlot(0).SetTensorInfo(tensorAndData.first.GetInfo());
4980 RegisterOutputSlots(subgraphIndex,
4981 VIRTUAL_OPERATOR_ID,
4982 layer,
4983 { tensorIndex });
4984 }
4985 else if (ShouldConstantTensorBeCreated(tensorIndex))
4986 {
4987 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
4988 armnn::DataType dataType = tensorInfo.GetDataType();
4989
4990 if (std::find(m_ConstantsToDequantize.begin(), m_ConstantsToDequantize.end(), tensorPtr->buffer)
4991 != m_ConstantsToDequantize.end())
4992 {
4993 dataType = DataType::Float32;
4994 }
4995 // Make sure isConstant flag is set.
4996 tensorInfo.SetConstant();
4997 tensorInfo.SetDataType(dataType);
4998
4999 auto tensorAndData = ConstTensor(tensorInfo, std::vector<uint8_t>(tensorInfo.GetNumBytes()));
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005000
Matthew Sloyan81beae32021-07-13 19:46:11 +01005001 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005002 IConnectableLayer* layer = m_Network->AddConstantLayer(tensorAndData, layerName.c_str());
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005003
Matthew Sloyan81beae32021-07-13 19:46:11 +01005004 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
5005 RegisterOutputSlots(subgraphIndex,
5006 VIRTUAL_OPERATOR_ID,
5007 layer,
Mike Kelly5880b912022-01-28 16:18:54 +00005008 {tensorIndex});
Matthew Sloyan81beae32021-07-13 19:46:11 +01005009 }
5010 else
5011 {
5012 throw ParseException(
5013 fmt::format("Invalid Tensor: Tensor should be constant. {}",
5014 CHECK_LOCATION().AsString()));
5015 }
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02005016 }
5017 }
5018 }
5019}
5020
telsoa01c577f2c2018-08-31 09:22:23 +01005021// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Kevin May7d96b162021-02-03 17:38:41 +00005022TfLiteParserImpl::BufferRawPtr TfLiteParserImpl::GetBuffer(const ModelPtr& model, size_t bufferIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01005023{
5024 CHECK_BUFFER(model, bufferIndex);
5025 return model->buffers[bufferIndex].get();
5026}
5027
Matteo Martincigh747ef822018-12-18 09:26:39 +00005028template<typename T>
Kevin May7d96b162021-02-03 17:38:41 +00005029std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
5030TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
5031 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00005032 armnn::TensorInfo& tensorInfo,
5033 armnn::Optional<armnn::PermutationVector&> permutationVector)
5034{
Matthew Sloyan81beae32021-07-13 19:46:11 +01005035 // Make sure isConstant flag is set.
5036 tensorInfo.SetConstant();
5037
Matteo Martincigh747ef822018-12-18 09:26:39 +00005038 auto constData = CreateConstTensorImpl<T>(bufferPtr,
5039 tensorPtr,
5040 tensorInfo,
5041 permutationVector);
Kevin May7d96b162021-02-03 17:38:41 +00005042 TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
Matteo Martincigh747ef822018-12-18 09:26:39 +00005043 return std::make_pair(constData.first, std::move(storage));
5044}
5045
Mike Kelly5880b912022-01-28 16:18:54 +00005046bool TfLiteParserImpl::ShouldConstantTensorBeCreated(unsigned int tensorIndex)
5047{
5048 // If the TensorIndex appears in the list of ConstantsToBeCreated then return true
5049 return (std::find(m_ConstantsToBeCreated.begin(), m_ConstantsToBeCreated.end(), tensorIndex)
5050 != m_ConstantsToBeCreated.end());
5051}
5052
Finn Williamsd4fa5452021-03-01 12:31:41 +00005053bool TfLiteParserImpl::IsConstTensor(TensorRawPtr tensorPtr)
5054{
5055 CHECK_TENSOR_PTR(tensorPtr);
mathad01bf7edb62021-04-20 16:12:45 +01005056 bool isConst = true;
5057
5058 auto buffer = GetBuffer(m_Model, tensorPtr->buffer);
5059 if (buffer->data.size() == 0)
5060 {
5061 isConst = false;
5062 }
5063
5064 return isConst;
Finn Williamsd4fa5452021-03-01 12:31:41 +00005065}
5066
Kevin May7d96b162021-02-03 17:38:41 +00005067std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
Finn Williamsd4fa5452021-03-01 12:31:41 +00005068TfLiteParserImpl::CreateConstTensorPermuted(TensorRawPtr tensorPtr,
5069 armnn::TensorInfo& tensorInfo,
5070 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01005071{
5072 CHECK_TENSOR_PTR(tensorPtr);
5073 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5074 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5075
Matthew Sloyan81beae32021-07-13 19:46:11 +01005076 // Make sure isConstant flag is set.
5077 tensorInfo.SetConstant();
5078
telsoa01c577f2c2018-08-31 09:22:23 +01005079 switch (tensorInfo.GetDataType())
5080 {
5081 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005082 return CreateConstTensorAndStoreData<float>(bufferPtr,
5083 tensorPtr,
5084 tensorInfo,
5085 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00005086 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005087 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
5088 tensorPtr,
5089 tensorInfo,
5090 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00005091 case armnn::DataType::QSymmS8:
5092 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
5093 tensorPtr,
5094 tensorInfo,
5095 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00005096 case armnn::DataType::QAsymmS8:
5097 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
5098 tensorPtr,
5099 tensorInfo,
5100 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01005101 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00005102 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
5103 tensorPtr,
5104 tensorInfo,
5105 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01005106 default:
5107 {
5108 std::stringstream errString;
5109 errString << "Unexpected datatype when creating const tensor: "
5110 << armnn::GetDataTypeName(tensorInfo.GetDataType())
5111 << " shape:" << tensorInfo.GetShape()
5112 << CHECK_LOCATION().AsString();
5113 throw ParseException(errString.str());
5114 }
5115 }
5116}
5117
Finn Williamsd4fa5452021-03-01 12:31:41 +00005118armnn::ConstTensor TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
5119 armnn::TensorInfo& tensorInfo)
5120{
5121 CHECK_TENSOR_PTR(tensorPtr);
5122 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5123 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5124
Matthew Sloyan81beae32021-07-13 19:46:11 +01005125 // Make sure isConstant flag is set.
5126 tensorInfo.SetConstant();
5127
Finn Williamsd4fa5452021-03-01 12:31:41 +00005128 return ConstTensor(tensorInfo, bufferPtr->data.data());
5129}
5130
Mike Kelly5880b912022-01-28 16:18:54 +00005131std::pair<armnn::ConstTensor, std::unique_ptr<float[]>>
5132TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
5133 armnn::TensorInfo& tensorInfo,
5134 armnn::DataType inputDataType)
5135{
5136 CHECK_TENSOR_PTR(tensorPtr);
5137 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5138 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5139
5140 // Make sure isConstant flag is set.
5141 tensorInfo.SetConstant();
5142
Mike Kelly0506ef02023-01-03 16:29:44 +00005143 if (inputDataType == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
Mike Kelly5880b912022-01-28 16:18:54 +00005144 {
Mike Kelly0506ef02023-01-03 16:29:44 +00005145 try
5146 {
5147 TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
5148 std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
5149 return std::make_pair(ConstTensor(constTensorInfo, data.get()), std::move(data));
5150 }
Cathal Corbett9c843c32023-01-09 17:51:37 +00005151 catch (InvalidArgumentException&)
Mike Kelly0506ef02023-01-03 16:29:44 +00005152 {
5153 throw ParseException(
5154 fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
5155 GetDataTypeName(DataType::Float32),
5156 GetDataTypeName(tensorInfo.GetDataType()),
5157 CHECK_LOCATION().AsString()));
5158 }
Mike Kelly5880b912022-01-28 16:18:54 +00005159 }
5160 else
5161 {
5162 return std::make_pair(ConstTensor(tensorInfo, bufferPtr->data.data()), std::unique_ptr<float[]>());
5163 }
5164}
5165
5166std::pair<armnn::ConstTensor*, std::unique_ptr<float[]>>
5167TfLiteParserImpl::CreateConstTensorPtr(TensorRawPtr tensorPtr, armnn::TensorInfo& inputTensorInfo)
5168{
5169 CHECK_TENSOR_PTR(tensorPtr);
5170 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
5171 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
5172 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
5173
5174 // Make sure isConstant flag is set.
5175 tensorInfo.SetConstant();
5176
5177 if (inputTensorInfo.GetDataType() == DataType::Float32 && tensorInfo.GetDataType() != DataType::Float32)
5178 {
Mike Kelly0506ef02023-01-03 16:29:44 +00005179 try
5180 {
5181 TensorInfo constTensorInfo(tensorInfo.GetShape(), DataType::Float32, 0.0f, 0, true);
5182 std::unique_ptr<float[]> data = armnnUtils::ToFloatArray(bufferPtr->data, tensorInfo);
5183 return std::make_pair(new ConstTensor(constTensorInfo, data.get()), std::move(data));
5184 }
Cathal Corbett9c843c32023-01-09 17:51:37 +00005185 catch (InvalidArgumentException&)
Mike Kelly0506ef02023-01-03 16:29:44 +00005186 {
5187 throw ParseException(
5188 fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
5189 GetDataTypeName(DataType::Float32),
5190 GetDataTypeName(tensorInfo.GetDataType()),
5191 CHECK_LOCATION().AsString()));
5192 }
Mike Kelly5880b912022-01-28 16:18:54 +00005193 }
5194 else
5195 {
5196 return std::make_pair(new ConstTensor(tensorInfo, bufferPtr->data.data()), std::unique_ptr<float[]>());
5197 }
5198}
5199
Kevin May7d96b162021-02-03 17:38:41 +00005200BindingPointInfo TfLiteParserImpl::GetNetworkInputBindingInfo(size_t subgraphId,
5201 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01005202{
5203 CHECK_SUBGRAPH(m_Model, subgraphId);
5204 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
Mike Kelly0d77ae12022-01-07 17:42:27 +00005205 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005206 {
5207 if (input.second->name == name)
5208 {
5209 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
Colm Donelan4bc993b2021-11-09 20:39:10 +00005210 auto inputTensorInfo = ToTensorInfo(input.second);
5211 // Input tensors are always treated as constant tensors during network execution.
5212 inputTensorInfo.SetConstant(true);
5213 return std::make_pair(bindingId, inputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01005214 }
5215 }
5216
5217 std::stringstream bindings;
Mike Kelly0d77ae12022-01-07 17:42:27 +00005218 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005219 {
5220 bindings << "'" << input.second->name << "' ";
5221 }
5222
5223 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005224 fmt::format("No input binding found for subgraph:{} and name:{}. "
5225 "Possible inputs are: [{}] {}",
5226 subgraphId,
5227 name,
5228 bindings.str(),
5229 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005230}
5231
Kevin May7d96b162021-02-03 17:38:41 +00005232BindingPointInfo TfLiteParserImpl::GetNetworkOutputBindingInfo(size_t subgraphId,
5233 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01005234{
5235 CHECK_SUBGRAPH(m_Model, subgraphId);
5236 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005237 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01005238 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005239 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01005240 if (output.second->name == name)
5241 {
5242 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Mike Kelly377fb212023-01-10 15:55:28 +00005243 std::vector<unsigned int> shape = m_OverriddenOutputShapes.size() > 0 ?
5244 m_OverriddenOutputShapes[i] : AsUnsignedVector(output.second->shape);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00005245 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01005246 }
5247 }
5248
5249 std::stringstream bindings;
Mike Kelly0d77ae12022-01-07 17:42:27 +00005250 for (auto const& output : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005251 {
5252 bindings << "'" << output.second->name << "' ";
5253 }
5254
5255 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01005256 fmt::format("No output binding found for subgraph:{} and name:{}. "
5257 "Possible outputs are: [{}] {}",
5258 subgraphId,
5259 name,
5260 bindings.str(),
5261 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01005262}
5263
Kevin May7d96b162021-02-03 17:38:41 +00005264size_t TfLiteParserImpl::GetSubgraphCount() const
telsoa01c577f2c2018-08-31 09:22:23 +01005265{
5266 return m_Model->subgraphs.size();
5267}
5268
Kevin May7d96b162021-02-03 17:38:41 +00005269std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01005270{
5271 CHECK_SUBGRAPH(m_Model, subgraphId);
5272 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
5273 std::vector<std::string> result;
5274 result.reserve(inputs.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +00005275 for (auto const& input : inputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005276 {
5277 result.push_back(input.second->name);
5278 }
5279 return result;
5280}
5281
Kevin May7d96b162021-02-03 17:38:41 +00005282std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01005283{
5284 CHECK_SUBGRAPH(m_Model, subgraphId);
5285 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
5286 std::vector<std::string> result;
5287 result.reserve(outputs.size());
Mike Kelly0d77ae12022-01-07 17:42:27 +00005288 for (auto const& output : outputs)
telsoa01c577f2c2018-08-31 09:22:23 +01005289 {
5290 result.push_back(output.second->name);
5291 }
5292 return result;
5293}
5294
Matthew Sloyanac001ee2021-02-03 10:43:04 +00005295const std::string TfLiteParserImpl::GetVersion()
5296{
5297 return TFLITE_PARSER_VERSION;
5298}
5299
Mike Kelly0d77ae12022-01-07 17:42:27 +00005300TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005301: m_FloatData(std::move(data))
5302, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00005303, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01005304, m_Int32Data(nullptr)
5305{
5306}
5307
Mike Kelly0d77ae12022-01-07 17:42:27 +00005308TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005309: m_FloatData(nullptr)
5310, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00005311, m_Int8Data(nullptr)
5312, m_Int32Data(nullptr)
5313{
5314}
5315
Mike Kelly0d77ae12022-01-07 17:42:27 +00005316TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]>&& data)
Keith Davisd305e1a2020-01-22 11:57:54 +00005317: m_FloatData(nullptr)
5318, m_Uint8Data(nullptr)
5319, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01005320, m_Int32Data(nullptr)
5321{
5322}
5323
Mike Kelly0d77ae12022-01-07 17:42:27 +00005324TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]>&& data)
telsoa01c577f2c2018-08-31 09:22:23 +01005325: m_FloatData(nullptr)
5326, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00005327, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01005328, m_Int32Data(std::move(data))
5329{
5330}
5331
5332} // armnnTfLiteParser