blob: 9b1fa9075ccd5e4e8ae44364292e75a2ead1bc8a [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kellyc5789ca2020-07-06 19:24:15 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Sloyanac001ee2021-02-03 10:43:04 +00008#include "armnnTfLiteParser/Version.hpp"
9
Sadik Armagand109a4d2020-07-28 10:42:13 +010010#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +000011#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010012#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000013#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010014#include <armnn/Tensor.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000015#include <armnnUtils/TensorUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010016#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010017#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000018#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010019#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010020
21// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000022#include <armnnUtils/Permute.hpp>
Francis Murtagh532a29d2020-06-29 11:50:01 +010023#include <Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000024
Sadik Armagan479045b2018-10-01 11:51:37 +010025#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026#include <VerificationHelpers.hpp>
27
28// The generated code based on the Tf Lite schema:
29#include <schema_generated.h>
30
Matteo Martincighe011d202019-11-28 11:35:47 +000031#include <flatbuffers/flexbuffers.h>
32
James Ward58dec6b2020-09-11 17:32:44 +010033#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010034
telsoa01c577f2c2018-08-31 09:22:23 +010035#include <algorithm>
Matthew Sloyanac001ee2021-02-03 10:43:04 +000036#include <fstream>
37#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010038#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010039#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000040#include <sstream>
41
42#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
43 { \
44 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
45 << ": " \
46 << CHECK_LOCATION().AsString()).str()); \
47 }
telsoa01c577f2c2018-08-31 09:22:23 +010048
49using namespace armnn;
50using armnn::CheckLocation;
51namespace armnnTfLiteParser
52{
Kevin May7d96b162021-02-03 17:38:41 +000053
54ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
55 pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
56
57ITfLiteParser::~ITfLiteParser() = default;
58
59ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
60{
61 return new ITfLiteParser(options);
62}
63
64ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
65{
66 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
67}
68
69void ITfLiteParser::Destroy(ITfLiteParser* parser)
70{
71 delete parser;
72}
73
74armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
75{
76 return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
77}
78
79armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
80{
81 return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
82}
83
84BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
85 const std::string& name) const
86{
87 return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
88}
89
90BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
91 const std::string& name) const
92{
93 return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
94}
95
96size_t ITfLiteParser::GetSubgraphCount() const
97{
98 return pTfLiteParserImpl->GetSubgraphCount();
99}
100
101std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
102{
103 return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
104}
105
106std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
107{
108 return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
109}
110
telsoa01c577f2c2018-08-31 09:22:23 +0100111namespace
112{
jimfly01c25411c2018-11-14 17:47:22 +0000113
telsoa01c577f2c2018-08-31 09:22:23 +0100114const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
115
Kevin May7d96b162021-02-03 17:38:41 +0000116void CheckSubgraph(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100117 size_t subgraphIndex,
118 const CheckLocation & location)
119{
120 if (model.get() == nullptr)
121 {
122 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100123 fmt::format("{} was called with invalid (null) model. "
124 "Possible reason is that the model is not yet loaded and Unpack(ed). "
125 "subgraph:{} at {}",
126 location.m_Function,
127 subgraphIndex,
128 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100129 }
130 else if (subgraphIndex >= model->subgraphs.size())
131 {
132 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100133 fmt::format("{} was called with an invalid subgraph index. "
134 "subgraph:{} at {}",
135 location.m_Function,
136 subgraphIndex,
137 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100138 }
139}
140
141#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
142 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
143
Kevin May7d96b162021-02-03 17:38:41 +0000144void CheckModel(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100145 size_t subgraphIndex,
146 size_t operatorIndex,
147 const CheckLocation & location)
148{
149 if (model.get() == nullptr)
150 {
151 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100152 fmt::format("{} was called with invalid (null) model. "
153 "Possible reason is that the model is not yet loaded and Unpack(ed). "
154 "subgraph:{} operator:{} at {}",
155 location.m_Function,
156 subgraphIndex,
157 operatorIndex,
158 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100159 }
160 else if (subgraphIndex >= model->subgraphs.size())
161 {
162 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100163 fmt::format("{} was called with an invalid subgraph index. "
164 "subgraph:{} operator:{} at {}",
165 location.m_Function,
166 subgraphIndex,
167 operatorIndex,
168 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100169 }
170 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
171 operatorIndex != VIRTUAL_OPERATOR_ID)
172 {
173 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100174 fmt::format("{} was called with an invalid operator index. "
175 "subgraph:{} operator:{} at {}",
176 location.m_Function,
177 subgraphIndex,
178 operatorIndex,
179 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100180 }
181}
182
183#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
184 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
185
Kevin May7d96b162021-02-03 17:38:41 +0000186void CheckTensor(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100187 size_t subgraphIndex,
188 size_t tensorIndex,
189 const CheckLocation & location)
190{
191 // not checking model, because I assume CHECK_MODEL already run
192 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100193 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100194
195 // also subgraph index should be checked by CHECK_MODEL so
196 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100197 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100198
199 // the tensor index is the only one to check here
200 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
201 {
202 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100203 fmt::format("{} was called with an invalid tensor index. "
204 "subgraph:{} tensor:{} at {}",
205 location.m_Function,
206 subgraphIndex,
207 tensorIndex,
208 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100209 }
210}
211
212#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
213 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
214
Kevin May7d96b162021-02-03 17:38:41 +0000215void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100216 const CheckLocation & location)
217{
218 if (rawPtr == nullptr)
219 {
220 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100221 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100222 }
223}
224
225#define CHECK_TENSOR_PTR(TENSOR_PTR) \
226 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
227
Kevin May7d96b162021-02-03 17:38:41 +0000228void CheckBuffer(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100229 size_t bufferIndex,
230 const CheckLocation & location)
231{
232 if (model.get() == nullptr)
233 {
234 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100235 fmt::format("{} was called with invalid (null) model. "
236 "Possible reason is that the model is not yet loaded and Unpack(ed). "
237 "buffer:{} at {}",
238 location.m_Function,
239 bufferIndex,
240 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100241 }
242 else if (bufferIndex >= model->buffers.size())
243 {
244 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100245 fmt::format("{} was called with an invalid buffer index. "
246 "buffer index:{} at {}",
247 location.m_Function,
248 bufferIndex,
249 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100250 }
251 else if (model->buffers[bufferIndex].get() == nullptr)
252 {
253 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100254 fmt::format("The buffer #{} is null. {}",
255 bufferIndex,
256 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100257 }
258}
259
260#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
261 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
262
Kevin May7d96b162021-02-03 17:38:41 +0000263void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100264 const armnn::TensorInfo & tensorInfo,
265 uint32_t bufferId,
266 const CheckLocation & location)
267{
268 if (bufferPtr == nullptr)
269 {
270 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100271 fmt::format("BufferPtr is null for buffer:{}. {}",
272 bufferId,
273 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100274 }
275 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
276 tensorInfo.GetNumBytes() > bufferPtr->data.size())
277 {
278 std::stringstream ss;
279 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
280 << "For tensor: " << tensorInfo.GetShape()
281 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
282 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
283 throw ParseException(ss.str());
284 }
285}
286
287#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
288 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
289
290bool IsActivationSupported(tflite::ActivationFunctionType activationType)
291{
292 switch(activationType)
293 {
294 case tflite::ActivationFunctionType_NONE:
295 case tflite::ActivationFunctionType_RELU:
296 case tflite::ActivationFunctionType_RELU6:
297 case tflite::ActivationFunctionType_TANH:
298 {
299 return true;
300 }
301 default:
302 {
303 return false;
304 }
305 }
306}
307
308#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
309 do { \
310 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
311 { \
312 throw ParseException( \
James Ward58dec6b2020-09-11 17:32:44 +0100313 fmt::format("TfLite parser doesn't suppport fused activation: " \
314 "{}/{} in {} subgraph:{} operator:{} at {}", \
315 OPTION->fused_activation_function, \
316 tflite::EnumNameActivationFunctionType(\
317 OPTION->fused_activation_function), \
318 __func__, \
319 SUBGRAPH_INDEX, \
320 OPERATOR_INDEX, \
321 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100322 } \
323 } while(false)
324
325
326std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
327{
328 std::vector<unsigned int> result;
329 result.reserve(in.size());
330 for (auto & i : in)
331 {
332 result.push_back(CHECKED_NON_NEGATIVE(i));
333 }
334 return result;
335}
336
337void CalcPadding(uint32_t inputSize,
338 uint32_t filterSize,
339 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100340 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100341 uint32_t& paddingFront,
342 uint32_t& paddingBack,
343 tflite::Padding padding)
344{
345 paddingFront = 0;
346 paddingBack = 0;
347 if (padding == tflite::Padding_SAME)
348 {
349 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100350 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
351 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100352 if (temp > inputSize)
353 {
354 paddingFront = (temp - inputSize) / 2;
355 paddingBack = (temp - inputSize) - paddingFront;
356 }
357 }
358}
359
Kevin May7d96b162021-02-03 17:38:41 +0000360armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100361 const std::vector<unsigned int>& shapes,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100362 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100363{
364 armnn::DataType type;
365 CHECK_TENSOR_PTR(tensorPtr);
366
367 switch (tensorPtr->type)
368 {
369 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000370 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100371 break;
372 case tflite::TensorType_FLOAT32:
373 type = armnn::DataType::Float32;
374 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000375 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000376 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000377 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000378 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000379 type = armnn::DataType::QAsymmS8;
380 }
381 else
382 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000383 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000384 type = armnn::DataType::QSymmS8;
385 }
Finn Williamsed66d142019-12-06 09:55:55 +0000386 break;
387 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000388 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000389 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100390 case tflite::TensorType_INT32:
391 type = armnn::DataType::Signed32;
392 break;
Inki Daed4619e22020-09-10 15:33:54 +0900393 case tflite::TensorType_INT64:
394 type = armnn::DataType::Signed64;
395 break;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100396 case tflite::TensorType_BOOL:
397 type = armnn::DataType::Boolean;
398 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100399 default:
400 {
401 CheckLocation location = CHECK_LOCATION();
402 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100403 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
404 tensorPtr->type,
405 tflite::EnumNameTensorType(tensorPtr->type),
406 tensorPtr->name,
407 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100408 }
409 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100410 std::vector<unsigned int> safeShape = shapes;
Sadik Armagand109a4d2020-07-28 10:42:13 +0100411 bool isDynamic = false;
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100412 if (safeShape.size() == 0)
413 {
414 safeShape.push_back(1);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100415 if (outputTensor)
416 {
417 isDynamic = true;
418 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100419 }
420
Keith Davisd305e1a2020-01-22 11:57:54 +0000421 float quantizationScale = 0.0f;
422 int32_t quantizationOffset = 0;
423
424 if (tensorPtr->quantization.get())
425 {
426 if (tensorPtr->quantization->scale.size() <= 1)
427 {
428 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
429 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
430
431 if (tensorPtr->quantization->scale.size() == 1)
432 {
433 quantizationScale = tensorPtr->quantization->scale[0];
434 }
435 if (tensorPtr->quantization->zero_point.size() == 1)
436 {
437 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000438 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100439 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000440 }
441
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100442 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100443 safeShape.data());
444 if (isDynamic)
445 {
446 tensorShape = TensorShape(1, false);
447 }
448 armnn::TensorInfo result(tensorShape,
449 type,
450 quantizationScale,
451 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000452 return result;
453 }
454 else
455 {
456 std::vector<float> quantizationScales;
457 std::vector<int32_t> quantizationOffsets;
458
459 // Scale
460 std::copy(tensorPtr->quantization->scale.begin(),
461 tensorPtr->quantization->scale.end(),
462 std::back_inserter(quantizationScales));
463
Keith Davis0c2eeac2020-02-11 16:51:50 +0000464 // QSymmS8 Per-axis
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100465 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100466 safeShape.data());
467 if (isDynamic)
468 {
469 tensorShape = TensorShape(1, false);
470 }
471 armnn::TensorInfo result(tensorShape,
472 type,
473 quantizationScales,
Jan Eilers7612bd62021-04-06 17:29:03 +0100474 armnn::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
Keith Davisd305e1a2020-01-22 11:57:54 +0000475 return result;
476 }
477 }
478 else
479 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100480 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100481 safeShape.data());
482 if (isDynamic)
483 {
484 tensorShape = TensorShape(1, false);
485 }
486 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000487 type,
488 quantizationScale,
489 quantizationOffset);
490 return result;
491 }
telsoa01c577f2c2018-08-31 09:22:23 +0100492}
493
Jan Eilers7612bd62021-04-06 17:29:03 +0100494armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr)
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000495{
496 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100497 return ToTensorInfo(tensorPtr, dimensions);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000498}
499
Kevin May7d96b162021-02-03 17:38:41 +0000500armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100501 const bool outputTensor)
502{
503 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100504 return ToTensorInfo(tensorPtr, dimensions, outputTensor);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100505}
506
telsoa01c577f2c2018-08-31 09:22:23 +0100507template<typename T>
508std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
Kevin May7d96b162021-02-03 17:38:41 +0000509CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
510 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000511 armnn::TensorInfo& tensorInfo,
512 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100513{
Jan Eilers8eb25602020-03-09 12:13:48 +0000514 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100515 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
516 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
James Ward58dec6b2020-09-11 17:32:44 +0100517 fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
telsoa01c577f2c2018-08-31 09:22:23 +0100518
519 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000520
521 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
522 {
523 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000524 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
525 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000526 }
527 else
528 {
529 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
530 }
531
telsoa01c577f2c2018-08-31 09:22:23 +0100532 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
533}
534
telsoa01c577f2c2018-08-31 09:22:23 +0100535armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
536{
537 // generate the binding id by shifting the tensor id by 8 bit
538 // and add the subgraph id, which allows 256 subgraphs
539 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
540}
541
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000542bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
543{
544 const unsigned int actualSize = actual.GetNumDimensions();
545 if (actualSize != expected.size())
546 {
547 return false;
548 }
549
550 for (unsigned int i = 0u; i < actualSize; i++)
551 {
552 if (expected[i] < 0 ||
553 actual[i] != static_cast<unsigned int>(expected[i]))
554 {
555 return false;
556 }
557 }
558
559 return true;
560}
561
James Conroy05102392020-06-24 15:39:55 +0100562void CheckMatchingQuantization(const TensorInfo& first,
563 const TensorInfo& second,
564 const std::string& descName,
565 std::string const& firstName,
566 std::string const& secondName)
567{
568 if (!first.IsQuantized() ||
569 !second.IsQuantized())
570 {
571 // Not a quantized type, ignore the validation
572 return;
573 }
574
575 DataType firstDataType = first.GetDataType();
576 DataType secondDataType = second.GetDataType();
577
578 if (firstDataType != secondDataType)
579 {
580 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
581 " must be of the same quantized type, " +
582 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
583 secondName + " is " + GetDataTypeName(secondDataType));
584 }
585
586 if (!first.IsTypeSpaceMatch(second))
587 {
588 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
589 " must have the same quantization space, " +
590 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
591 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
592 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
593 " and scale " + std::to_string(second.GetQuantizationScale()));
594 }
595}
596
telsoa01c577f2c2018-08-31 09:22:23 +0100597} // <anonymous>
598
Kevin May7d96b162021-02-03 17:38:41 +0000599TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100600: m_Options(options)
601, m_Network(nullptr, nullptr)
Kevin May7d96b162021-02-03 17:38:41 +0000602, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
telsoa01c577f2c2018-08-31 09:22:23 +0100603{
604 // register supported operators
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100605 m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
Kevin May7d96b162021-02-03 17:38:41 +0000606 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100607 m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
608 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
Kevin May7d96b162021-02-03 17:38:41 +0000609 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
610 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
mathad01b392e982021-04-07 12:07:30 +0100611 m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
Kevin May7d96b162021-02-03 17:38:41 +0000612 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
613 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
614 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
615 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
616 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
617 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100618 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000619 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
620 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
621 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
622 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
623 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
624 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100625 m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
Kevin May7d96b162021-02-03 17:38:41 +0000626 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
627 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
628 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
629 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
630 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
631 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
632 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
633 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
634 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
635 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
636 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
637 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
638 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
Sadik Armagana2747482021-02-09 10:28:54 +0000639 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
640 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
Kevin May7d96b162021-02-03 17:38:41 +0000641 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
642 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
643 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100644 m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
Kevin May7d96b162021-02-03 17:38:41 +0000645 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
646 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
647 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
648 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
649 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
650 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
651 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
652 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
653 m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
654 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
655 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
656 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
657 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100658
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100659 // register supported custom operators
Kevin May7d96b162021-02-03 17:38:41 +0000660 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100661}
662
Kevin May7d96b162021-02-03 17:38:41 +0000663void TfLiteParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100664{
665 m_Network = armnn::INetworkPtr(nullptr, nullptr);
666 m_Model = nullptr;
667 m_SubgraphConnections.clear();
668}
669
Kevin May7d96b162021-02-03 17:38:41 +0000670INetworkPtr TfLiteParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100671{
672 ResetParser();
673 m_Model = LoadModelFromFile(graphFile);
674 return CreateNetworkFromModel();
675}
676
Kevin May7d96b162021-02-03 17:38:41 +0000677INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
telsoa01c577f2c2018-08-31 09:22:23 +0100678{
679 ResetParser();
680 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
681 return CreateNetworkFromModel();
682}
683
Kevin May7d96b162021-02-03 17:38:41 +0000684INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
telsoa01c577f2c2018-08-31 09:22:23 +0100685{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100686
687 using NetworkOptions = std::vector<BackendOptions>;
688 NetworkOptions networkOptions = {};
689 if (m_Options && m_Options.value().m_InferAndValidate)
690 {
691 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
692 {
693 { "InferAndValidate", true }
694 });
695
696 networkOptions.push_back(shapeInferenceMethodOption);
697 }
698
699 m_Network = INetwork::Create(networkOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100700 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100701
telsoa01c577f2c2018-08-31 09:22:23 +0100702 if (m_Model->subgraphs.size() != 1)
703 {
704 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100705 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
706 m_Model->subgraphs.size(),
707 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100708 }
709
710 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +0100711 size_t operatorIndex = 0;
712 try
telsoa01c577f2c2018-08-31 09:22:23 +0100713 {
Colm Donelan6350d272020-06-09 16:56:25 +0100714 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100715 {
Colm Donelan6350d272020-06-09 16:56:25 +0100716 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
717 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +0100718 {
Colm Donelan6350d272020-06-09 16:56:25 +0100719 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
telsoa01c577f2c2018-08-31 09:22:23 +0100720 auto builtinCode = opCodePtr->builtin_code;
721
722 if (builtinCode > tflite::BuiltinOperator_MAX)
723 {
James Ward58dec6b2020-09-11 17:32:44 +0100724 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
725 "subgraph:{} operator idx:{}. {}",
726 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
727 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100728 }
729
730 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +0100731 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +0100732 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +0100733 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +0100734 }
telsoa01c577f2c2018-08-31 09:22:23 +0100735
Colm Donelan6350d272020-06-09 16:56:25 +0100736 SetupInputLayers(subgraphIndex);
737 SetupOutputLayers(subgraphIndex);
738 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100739
Colm Donelan6350d272020-06-09 16:56:25 +0100740 ++subgraphIndex;
741 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +0100742 }
telsoa01c577f2c2018-08-31 09:22:23 +0100743 }
Colm Donelan6350d272020-06-09 16:56:25 +0100744 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +0100745 {
Colm Donelan6350d272020-06-09 16:56:25 +0100746 std::stringstream errorString;
747 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
748 << subgraphIndex << " error: " << e.what();
749 ARMNN_LOG(error) << errorString.str();
750 std::stringstream errors;
751 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +0100752 throw ParseException(errors.str());
753 }
754
755 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +0100756 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100757 {
758 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
759 {
760 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
761 {
762 for (size_t inputSlotIdx = 0;
763 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
764 ++inputSlotIdx)
765 {
766 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
767 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
768 }
769 }
770 }
771 }
772
773 return std::move(m_Network);
774}
775
Kevin May7d96b162021-02-03 17:38:41 +0000776void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
777 size_t tensorIndex,
778 armnn::IOutputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +0100779{
780 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100781 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
782 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100783
784 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
785
786 // assuming there is only one producer for that tensor
787 if (tensorSlots.outputSlot != nullptr)
788 {
James Ward58dec6b2020-09-11 17:32:44 +0100789 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
790 "subgraph:{} tensor:{} {}",
791 subgraphIndex,
792 tensorIndex,
793 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100794 }
795
796 tensorSlots.outputSlot = slot;
797}
798
Kevin May7d96b162021-02-03 17:38:41 +0000799void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
800 size_t tensorIndex,
801 armnn::IInputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +0100802{
803 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100804 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
805 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100806
Finn Williamsd4fa5452021-03-01 12:31:41 +0000807 TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +0100808 tensorSlots.inputSlots.push_back(slot);
809}
810
Kevin May7d96b162021-02-03 17:38:41 +0000811void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100812{
813 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
814
815 // NOTE: By default we presume the custom operator is not supported
Kevin May7d96b162021-02-03 17:38:41 +0000816 auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100817
818 // Identify custom code defined for custom operator
819 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
820 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
821
822 // Find parser function that correspondes to custom code (if any)
823 auto iterator = m_CustomParserFunctions.find(customCode);
824 if (iterator != m_CustomParserFunctions.end())
825 {
826 customParserFunction = iterator->second;
827 }
828
829 // Run parser function
830 (this->*customParserFunction)(subgraphIndex, operatorIndex);
831}
832
Kevin May7d96b162021-02-03 17:38:41 +0000833void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100834{
835 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100836
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100837 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
838
839 auto opcodeIndex = operatorPtr->opcode_index;
840 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
841
842 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
843 {
844 // Do not add StandInLayer, throw ParseException instead
845 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100846 fmt::format("Operator not supported. "
847 "subgraph:{} operator:{} "
848 "opcode_index:{} opcode:{} / {} {}",
849 subgraphIndex,
850 operatorIndex,
851 opcodeIndex,
852 opcode,
853 tflite::EnumNameBuiltinOperator(opcode),
854 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100855 }
856
857 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
858 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
859
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100860 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
861 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100862
863 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +0100864 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100865
866 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
867 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +0100868 ARMNN_ASSERT(layer != nullptr);
869
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100870 for (unsigned int i = 0u; i < numOutputs; ++i)
871 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100872 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100873 }
874
875 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
876 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
877
878 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
879 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100880}
881
mathad01b392e982021-04-07 12:07:30 +0100882void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex)
883{
884 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
885
886 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
887 CHECK_VALID_SIZE(inputs.size(), 1);
888 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
889 CHECK_VALID_SIZE(outputs.size(), 1);
890
891 auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex);
892
893 IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str());
894 ARMNN_ASSERT(layer != nullptr);
895
896 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
897 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
898
899 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
900 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
901
902 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
903 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
904}
905
Kevin May7d96b162021-02-03 17:38:41 +0000906void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100907{
908 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
909
910 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
911 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
912
913 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
914
915 Convolution2dDescriptor desc;
916 desc.m_BiasEnabled = false;
917 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
918 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000919 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100920 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
921 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000922
telsoa01c577f2c2018-08-31 09:22:23 +0100923 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
924 CHECK_VALID_SIZE(inputs.size(), 2, 3);
925
926 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
927 CHECK_VALID_SIZE(outputs.size(), 1);
928
929 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
930 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
931
932 // assuming input is NHWC
933 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
934 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
935
936 // assuming the filter is OHWI : Output, H, W, Input
937 // which is essentially the same as NHWC
938 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
939 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
940
Pablo Tellof0bd6832019-04-26 17:58:13 +0100941 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
942 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
943 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
944 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100945
Finn Williamsd4fa5452021-03-01 12:31:41 +0000946 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100947 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100948
James Ward58dec6b2020-09-11 17:32:44 +0100949 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100950
951 if (inputs.size() == 3)
952 {
953 desc.m_BiasEnabled = true;
954 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +0000955 auto biasTensorAndData = CreateConstTensorNonPermuted(inputs[2], biasTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100956 layer = m_Network->AddConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +0000957 filterTensorAndData,
958 Optional<ConstTensor>(biasTensorAndData),
telsoa01c577f2c2018-08-31 09:22:23 +0100959 layerName.c_str());
960 }
961 else
962 {
963 layer = m_Network->AddConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +0000964 filterTensorAndData,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100965 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100966 layerName.c_str());
967 }
968
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100969 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100970
Sadik Armagand109a4d2020-07-28 10:42:13 +0100971 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +0000972 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100973
974 // register the input connection slots for the layer, connections are made after all layers have been created
975 // only the tensors for the inputs are relevant, exclude the const tensors
976 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000977 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100978
jimfly01c25411c2018-11-14 17:47:22 +0000979 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100980 // register the output connection slots for the layer, connections are made after all layers have been created
981 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
982 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
983}
984
Kevin May7d96b162021-02-03 17:38:41 +0000985void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100986{
987 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
988
989 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
990 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
991
992 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
993
994 DepthwiseConvolution2dDescriptor desc;
995 desc.m_BiasEnabled = false;
996 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
997 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000998 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100999 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +01001000
1001 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1002 CHECK_VALID_SIZE(inputs.size(), 2, 3);
1003 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1004 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +01001005 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1006 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001007
Keith Davis0c2eeac2020-02-11 16:51:50 +00001008 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
1009 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001010
telsoa01c577f2c2018-08-31 09:22:23 +01001011 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Jan Eilers7612bd62021-04-06 17:29:03 +01001012 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
telsoa01c577f2c2018-08-31 09:22:23 +01001013
Matteo Martincigh747ef822018-12-18 09:26:39 +00001014 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +01001015 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1016 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +00001017
1018 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +01001019 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1020 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1021
Matteo Martincigh747ef822018-12-18 09:26:39 +00001022 // Reshape weights as [ H, W, I, M ]
1023 filterTensorInfo.SetShape({ filterHeight,
1024 filterWidth,
1025 inputTensorInfo.GetShape()[3],
1026 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
1027
Pablo Tellof0bd6832019-04-26 17:58:13 +01001028 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1029 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1030 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1031 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001032
Finn Williamsd4fa5452021-03-01 12:31:41 +00001033 auto filterTensorAndData = CreateConstTensorPermuted(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001034 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001035 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001036
1037 if (inputs.size() == 3)
1038 {
1039 desc.m_BiasEnabled = true;
1040 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00001041 auto biasTensorAndData = CreateConstTensorNonPermuted(inputs[2], biasTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001042 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1043 filterTensorAndData.first,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001044 Optional<ConstTensor>(biasTensorAndData),
telsoa01c577f2c2018-08-31 09:22:23 +01001045 layerName.c_str());
1046 }
1047 else
1048 {
1049 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1050 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001051 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +01001052 layerName.c_str());
1053 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001054 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001055
Sadik Armagand109a4d2020-07-28 10:42:13 +01001056 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +00001057 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001058
1059 // register the input connection slots for the layer, connections are made after all layers have been created
1060 // only the tensors for the inputs are relevant, exclude the const tensors
1061 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001062 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +01001063
jimfly01c25411c2018-11-14 17:47:22 +00001064 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001065 // register the output connection slots for the layer, connections are made after all layers have been created
1066 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1067 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1068}
1069
Kevin May7d96b162021-02-03 17:38:41 +00001070void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsed66d142019-12-06 09:55:55 +00001071{
1072 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1073
1074 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1075 CHECK_VALID_SIZE(inputs.size(), 1);
1076
1077 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1078 CHECK_VALID_SIZE(outputs.size(), 1);
1079
James Ward58dec6b2020-09-11 17:32:44 +01001080 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +00001081
1082 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001083 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +00001084
Sadik Armagand109a4d2020-07-28 10:42:13 +01001085 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Finn Williamsed66d142019-12-06 09:55:55 +00001086 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1087
1088 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1089 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1090
1091 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1092 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1093}
1094
Kevin May7d96b162021-02-03 17:38:41 +00001095void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
Keith Davis4cd29a02019-09-09 14:49:20 +01001096{
1097 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1098
1099 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001100 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001101
1102 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1103 CHECK_VALID_SIZE(outputs.size(), 1);
1104
James Ward58dec6b2020-09-11 17:32:44 +01001105 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001106 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001107
josh minorba424d22019-11-13 10:55:17 -06001108 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001109 {
1110 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1111 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001112 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1113 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001114 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001115 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001116
Mike Kelly08759e22020-03-02 11:41:31 +00001117 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001118 }
1119
James Conroy05102392020-06-24 15:39:55 +01001120 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001121 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001122 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001123
James Conroy05102392020-06-24 15:39:55 +01001124 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001125 ARMNN_ASSERT(layer != nullptr);
Keith Davis4cd29a02019-09-09 14:49:20 +01001126 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1127
1128 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1129 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1130
1131 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1132 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1133}
1134
Kevin May7d96b162021-02-03 17:38:41 +00001135void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001136{
1137 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1138
1139 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1140 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1141
1142 TransposeConvolution2dDescriptor desc;
1143 desc.m_BiasEnabled = false;
1144 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1145 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1146 desc.m_DataLayout = armnn::DataLayout::NHWC;
1147
1148 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
David Monahan61683802021-01-12 09:11:07 +00001149 if (inputs.size() == 4)
1150 {
1151 desc.m_BiasEnabled = true;
1152 }
1153 else
1154 {
1155 CHECK_VALID_SIZE(inputs.size(), 3);
1156 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001157
1158 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1159 CHECK_VALID_SIZE(outputs.size(), 1);
1160
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001161 if (inputs[0])
1162 {
1163 armnn::TensorInfo tensorInfo = ToTensorInfo(inputs[0]);
1164 std::vector<int> output_shape(tensorInfo.GetNumElements());
1165 if (tensorInfo.GetDataType() == DataType::Signed32)
1166 {
1167 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1168 }
1169 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1170 {
1171 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1172 {
1173 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1174 }
1175 }
1176 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1177 for (int dimension : output_shape)
1178 {
1179 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1180 }
1181 desc.m_OutputShapeEnabled = true;
1182 }
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001183 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001184 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1185
1186 // TfLite uses NHWC tensors
1187 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1188 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1189
1190 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1191 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1192
1193 CalcPadding(inputHeight,
1194 filterHeight,
1195 desc.m_StrideY,
1196 1, // DilationY
1197 desc.m_PadTop,
1198 desc.m_PadBottom,
1199 options->padding);
1200
1201 CalcPadding(inputWidth,
1202 filterWidth,
1203 desc.m_StrideX,
1204 1, // DilationX
1205 desc.m_PadLeft,
1206 desc.m_PadRight,
1207 options->padding);
1208
Finn Williamsd4fa5452021-03-01 12:31:41 +00001209 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001210
1211 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001212 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001213
David Monahan61683802021-01-12 09:11:07 +00001214 if (desc.m_BiasEnabled)
1215 {
1216 auto biasTensorInfo = ToTensorInfo(inputs[3]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00001217 auto biasConstTensor = CreateConstTensorNonPermuted(inputs[3], biasTensorInfo);
David Monahan61683802021-01-12 09:11:07 +00001218 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001219 filterTensorAndData,
1220 biasConstTensor,
David Monahan61683802021-01-12 09:11:07 +00001221 layerName.c_str());
1222 }
1223 else
1224 {
1225 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001226 filterTensorAndData,
David Monahan61683802021-01-12 09:11:07 +00001227 EmptyOptional(),
1228 layerName.c_str());
1229 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001230
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001231 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001232
Sadik Armagand109a4d2020-07-28 10:42:13 +01001233 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001234 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1235
1236 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1237 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001238 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001239
1240 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1241 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1242}
1243
Kevin May7d96b162021-02-03 17:38:41 +00001244void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001245{
1246 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1247}
1248
Kevin May7d96b162021-02-03 17:38:41 +00001249void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001250{
1251 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1252
1253 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1254 CHECK_VALID_SIZE(inputs.size(), 3);
1255
1256 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1257 CHECK_VALID_SIZE(outputs.size(), 1);
1258
1259 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1260 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1261
1262 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1263 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1264
1265 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1266 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1267
1268 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1269 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1270
1271 size_t step = 2;
1272 std::vector<std::pair<unsigned int, unsigned int>> crops;
1273 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1274 {
1275 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1276 }
1277
1278 armnn::BatchToSpaceNdDescriptor desc;
1279 desc.m_BlockShape = blockShape;
1280 desc.m_Crops = crops;
1281 desc.m_DataLayout = armnn::DataLayout::NHWC;
1282
James Ward58dec6b2020-09-11 17:32:44 +01001283 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001284
James Conroy05102392020-06-24 15:39:55 +01001285 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001286 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001287 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1288
1289 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1290 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001291 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1292
1293 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1294 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1295
1296 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1297 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1298}
1299
Kevin May7d96b162021-02-03 17:38:41 +00001300void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson28c94572019-07-18 10:47:03 +01001301{
1302 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1303
1304 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1305 CHECK_VALID_SIZE(inputs.size(), 1);
1306
1307 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1308 CHECK_VALID_SIZE(outputs.size(), 1);
1309
1310 L2NormalizationDescriptor desc;
1311 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001312 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001313 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1314
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001315 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001316
Sadik Armagand109a4d2020-07-28 10:42:13 +01001317 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson28c94572019-07-18 10:47:03 +01001318 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1319
1320 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1321 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1322
1323 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1324 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1325}
1326
Kevin May7d96b162021-02-03 17:38:41 +00001327void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001328{
1329 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1330}
1331
Kevin May7d96b162021-02-03 17:38:41 +00001332void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001333{
1334 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1335
1336 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1337 CHECK_VALID_SIZE(inputs.size(), 2);
1338
1339 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1340 CHECK_VALID_SIZE(outputs.size(), 1);
1341
James Ward58dec6b2020-09-11 17:32:44 +01001342 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001343
1344 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1345 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1346 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001347
Sadik Armagand109a4d2020-07-28 10:42:13 +01001348 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001349 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1350
1351 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1352 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001353 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1354
1355 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001356 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001357
1358 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1359 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1360}
1361
Kevin May7d96b162021-02-03 17:38:41 +00001362void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001363{
1364 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1365
1366 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1367 CHECK_VALID_SIZE(inputs.size(), 2);
1368
1369 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1370 CHECK_VALID_SIZE(outputs.size(), 1);
1371
James Ward58dec6b2020-09-11 17:32:44 +01001372 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001373
1374 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1375 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1376 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001377
Sadik Armagand109a4d2020-07-28 10:42:13 +01001378 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001379 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1380
1381 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1382 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001383 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1384
1385 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001386 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001387
1388 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1389 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1390}
1391
Kevin May7d96b162021-02-03 17:38:41 +00001392void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
1393 size_t operatorIndex,
1394 PoolingAlgorithm algorithm)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001395{
1396 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1397
1398 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1399 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1400
1401 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1402
1403 std::string layerName;
1404
1405 switch (algorithm)
1406 {
1407 case PoolingAlgorithm::Average:
1408 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001409 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001410 break;
1411 case PoolingAlgorithm::Max:
1412 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001413 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001414 break;
1415 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001416 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001417 }
1418
1419 Pooling2dDescriptor desc;
1420
1421 desc.m_PoolType = algorithm;
1422 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1423 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1424 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1425 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1426 desc.m_PaddingMethod = PaddingMethod::Exclude;
1427 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001428 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001429
1430 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1431 CHECK_VALID_SIZE(inputs.size(), 1);
1432 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1433
1434 // assuming input is NHWC
1435 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1436 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1437
Pablo Tellof0bd6832019-04-26 17:58:13 +01001438 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1439 desc.m_PadTop, desc.m_PadBottom, options->padding);
1440 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1441 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001442
1443 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1444 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001445
Sadik Armagand109a4d2020-07-28 10:42:13 +01001446 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001447 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1448
1449 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1450 ARMNN_ASSERT(layer != nullptr);
jimfly01c25411c2018-11-14 17:47:22 +00001451 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001452
1453 // register the input connection slots for the layer, connections are made after all layers have been created
1454 // only the tensors for the inputs are relevant, exclude the const tensors
1455 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001456 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001457
jimfly01c25411c2018-11-14 17:47:22 +00001458 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001459 // register the output connection slots for the layer, connections are made after all layers have been created
1460 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1461 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1462}
1463
Kevin May7d96b162021-02-03 17:38:41 +00001464void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
josh minorba424d22019-11-13 10:55:17 -06001465{
1466 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1467
1468 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1469 CHECK_VALID_SIZE(inputs.size(), 3);
1470 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1471 CHECK_VALID_SIZE(outputs.size(), 1);
1472
1473 SliceDescriptor desc;
1474
1475 // set begin tensor info for slice descriptor
1476 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1477 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1478
1479 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1480 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1481
1482 // set size tensor info for slice descriptor
1483 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1484 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1485
1486 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1487 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1488 desc = SliceDescriptor(begin, size);
1489
James Ward58dec6b2020-09-11 17:32:44 +01001490 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06001491
James Conroy05102392020-06-24 15:39:55 +01001492 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001493 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001494 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1495
1496 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
josh minorba424d22019-11-13 10:55:17 -06001497 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1498
1499 // register the input connection slots for the layer, connections are made after all layers have been created
1500 // only the tensors for the inputs are relevant, exclude the const tensors
1501 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1502 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1503
1504 // register the output connection slots for the layer, connections are made after all layers have been created
1505 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1506 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1507}
1508
Kevin May7d96b162021-02-03 17:38:41 +00001509void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001510{
1511 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1512 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1513 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1514
1515 SoftmaxDescriptor desc;
1516 desc.m_Beta = options->beta;
1517
1518 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1519 CHECK_VALID_SIZE(inputs.size(), 1);
1520 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1521 CHECK_VALID_SIZE(outputs.size(), 1);
1522
James Ward58dec6b2020-09-11 17:32:44 +01001523 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001524 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1525
Sadik Armagand109a4d2020-07-28 10:42:13 +01001526 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
telsoa01c577f2c2018-08-31 09:22:23 +01001527 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1528
1529 // register the input connection slots for the layer, connections are made after all layers have been created
1530 // only the tensors for the inputs are relevant, exclude the const tensors
1531 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1532 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1533
1534 // register the output connection slots for the layer, connections are made after all layers have been created
1535 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1536 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1537}
1538
Kevin May7d96b162021-02-03 17:38:41 +00001539void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001540{
1541 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1542
1543 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1544 CHECK_VALID_SIZE(inputs.size(), 3);
1545
1546 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1547 CHECK_VALID_SIZE(outputs.size(), 1);
1548
1549 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1550 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1551
1552 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1553 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1554
1555 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1556 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1557
1558 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1559 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1560
1561 size_t step = 2;
1562 std::vector<std::pair<unsigned int, unsigned int>> padList;
1563 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1564 {
1565 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1566 }
1567
1568 armnn::SpaceToBatchNdDescriptor desc;
1569 desc.m_BlockShape = blockShape;
1570 desc.m_PadList = padList;
1571 desc.m_DataLayout = armnn::DataLayout::NHWC;
1572
James Ward58dec6b2020-09-11 17:32:44 +01001573 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001574
James Conroy05102392020-06-24 15:39:55 +01001575 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001576 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001577 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1578
1579 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1580 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001581 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1582
1583 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1584 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1585
1586 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1587 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1588}
1589
Kevin May7d96b162021-02-03 17:38:41 +00001590armnn::TensorInfo TfLiteParserImpl::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1591 const armnn::TensorInfo & inputTensorInfo)
telsoa01c577f2c2018-08-31 09:22:23 +01001592{
1593 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1594 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1595 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1596
1597 if (inputTensorInfo.GetNumDimensions() > 4)
1598 {
1599 std::stringstream ss;
1600 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1601 << " shape:" << inputTensorInfo.GetShape() << " "
1602 << CHECK_LOCATION().AsString();
1603 throw ParseException(ss.str());
1604 }
1605
1606 if (squeezeDims.empty())
1607 {
1608 squeezeDims.assign(dimensionSequence,
1609 dimensionSequence+inputTensorInfo.GetNumDimensions());
1610 }
1611
1612 std::vector<uint32_t> outputDims;
1613 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1614 {
1615 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1616 auto currentDimension = inputTensorInfo.GetShape()[i];
1617 if (skipSqueeze || currentDimension != 1)
1618 {
1619 outputDims.push_back(currentDimension);
1620 }
1621 }
1622
1623 if (outputDims.size() > 4)
1624 {
1625 std::stringstream ss;
1626 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1627 << " shape:" << inputTensorInfo.GetShape() << " "
1628 << CHECK_LOCATION().AsString();
1629 throw ParseException(ss.str());
1630 }
1631
1632 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1633 outputDims.data());
1634
1635 // we need to preserve the tensor type and the quantization data as well
1636 TensorInfo outTensorInfo = inputTensorInfo;
1637 outTensorInfo.SetShape(outShape);
1638
1639 return outTensorInfo;
1640}
1641
Kevin May7d96b162021-02-03 17:38:41 +00001642void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001643{
1644 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1645
1646 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1647 CHECK_VALID_SIZE(inputs.size(), 1);
1648
1649 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1650 CHECK_VALID_SIZE(outputs.size(), 1);
1651
1652 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1653 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01001654 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001655
1656 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1657 armnn::TensorInfo outputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00001658 TfLiteParserImpl::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
telsoa01c577f2c2018-08-31 09:22:23 +01001659 inputTensorInfo);
James Conroy05102392020-06-24 15:39:55 +01001660 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01001661
1662 ReshapeDescriptor reshapeDesc;
1663 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1664
telsoa01c577f2c2018-08-31 09:22:23 +01001665 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001666 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001667 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1668
1669 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1670 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1671
1672 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1673 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1674}
1675
Kevin May7d96b162021-02-03 17:38:41 +00001676void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001677{
1678 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1679
1680 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1681 CHECK_VALID_SIZE(inputs.size(), 4);
1682
1683 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1684 CHECK_VALID_SIZE(outputs.size(), 1);
1685
1686 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1687 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1688
1689 StridedSliceDescriptor desc;
1690 desc.m_BeginMask = options->begin_mask;
1691 desc.m_EllipsisMask = options->ellipsis_mask;
1692 desc.m_EndMask = options->end_mask;
1693 desc.m_NewAxisMask = options->new_axis_mask;
1694 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1695 desc.m_DataLayout = armnn::DataLayout::NHWC;
1696
1697 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1698 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1699
1700 std::vector<int> begin(beginTensorInfo.GetNumElements());
1701 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1702
1703 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1704 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1705
1706 std::vector<int> end(endTensorInfo.GetNumElements());
1707 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1708
1709 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1710 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1711
1712 std::vector<int> stride(strideTensorInfo.GetNumElements());
1713 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1714
1715 desc.m_Begin = begin;
1716 desc.m_End = end;
1717 desc.m_Stride = stride;
1718
James Ward58dec6b2020-09-11 17:32:44 +01001719 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001720 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001721 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001722
Sadik Armagand109a4d2020-07-28 10:42:13 +01001723 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001724 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1725
1726 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1727 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1728
1729 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1730 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1731}
1732
Kevin May7d96b162021-02-03 17:38:41 +00001733void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001734{
1735 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1736
1737 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1738 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1739
1740 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1741 CHECK_VALID_SIZE(inputs.size(), 2);
1742
1743 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1744 CHECK_VALID_SIZE(outputs.size(), 1);
1745
1746 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1747 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1748
James Ward58dec6b2020-09-11 17:32:44 +01001749 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001750 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001751 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001752
Sadik Armagand109a4d2020-07-28 10:42:13 +01001753 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001754 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1755
1756 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001757 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001758
1759 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1760
1761 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1762 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1763}
1764
Kevin May7d96b162021-02-03 17:38:41 +00001765void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301766{
1767 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1768
1769 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1770 const auto * options = operatorPtr->builtin_options.AsDivOptions();
1771
1772 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1773 CHECK_VALID_SIZE(inputs.size(), 2);
1774
1775 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1776 CHECK_VALID_SIZE(outputs.size(), 1);
1777
1778 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1779 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1780
James Ward58dec6b2020-09-11 17:32:44 +01001781 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301782 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001783 ARMNN_ASSERT(layer != nullptr);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301784
Sadik Armagand109a4d2020-07-28 10:42:13 +01001785 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301786 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1787
1788 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001789 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301790 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1791
1792 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1793 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1794}
1795
Kevin May7d96b162021-02-03 17:38:41 +00001796void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001797{
1798 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1799
1800 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1801 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1802
1803 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1804 CHECK_VALID_SIZE(inputs.size(), 2);
1805
1806 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1807 CHECK_VALID_SIZE(outputs.size(), 1);
1808
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001809 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1810 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1811
James Ward58dec6b2020-09-11 17:32:44 +01001812 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001813 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001814 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001815
Sadik Armagand109a4d2020-07-28 10:42:13 +01001816 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001817 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1818
1819 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001820 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001821 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1822
1823 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1824 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1825}
1826
Kevin May7d96b162021-02-03 17:38:41 +00001827void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001828{
1829 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1830
1831 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1832 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1833
1834 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1835 CHECK_VALID_SIZE(inputs.size(), 2);
1836
1837 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1838 CHECK_VALID_SIZE(outputs.size(), 1);
1839
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001840 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1841 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1842
James Ward58dec6b2020-09-11 17:32:44 +01001843 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001844 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001845 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001846
Sadik Armagand109a4d2020-07-28 10:42:13 +01001847 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001848 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1849
1850 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001851 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001852 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1853
1854 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1855 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1856}
1857
Kevin May7d96b162021-02-03 17:38:41 +00001858void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001859{
1860 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1861
1862 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1863
1864 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1865 CHECK_VALID_SIZE(outputs.size(), 1);
1866
1867 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1868 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1869
1870 armnn::MeanDescriptor desc;
1871 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1872 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1873 desc.m_Axis = axis;
1874
1875 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001876 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001877
1878 desc.m_KeepDims =
1879 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1880 true : false;
1881
James Ward58dec6b2020-09-11 17:32:44 +01001882 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001883 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001884 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001885
1886 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1887
1888 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1889 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1890
1891 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1892 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1893}
1894
Kevin May7d96b162021-02-03 17:38:41 +00001895void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001896{
1897 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1898
Kevin May7d96b162021-02-03 17:38:41 +00001899 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001900
Kevin May7d96b162021-02-03 17:38:41 +00001901 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001902 CHECK_VALID_SIZE(outputs.size(), 1);
1903
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00001904 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1905
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001906 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1907 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1908
1909 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1910 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1911
1912 size_t step = 2;
1913 armnn::PadDescriptor desc;
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00001914 if (inputTensorInfo.IsQuantized())
1915 {
1916 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
1917 }
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001918 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1919 {
1920 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1921 }
1922
James Ward58dec6b2020-09-11 17:32:44 +01001923 auto layerName = fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001924 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001925
1926 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1927 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001928 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1929
1930 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1931 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1932
1933 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1934 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1935}
1936
Kevin May7d96b162021-02-03 17:38:41 +00001937void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan66dedc72019-12-10 16:32:07 +00001938{
1939 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1940
1941 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1942 CHECK_VALID_SIZE(inputs.size(), 1);
1943
1944 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1945 CHECK_VALID_SIZE(outputs.size(), 1);
1946
James Ward58dec6b2020-09-11 17:32:44 +01001947 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001948
1949 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001950 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001951
Sadik Armagand109a4d2020-07-28 10:42:13 +01001952 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001953 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1954
1955 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1956 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1957
1958 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1959 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1960}
Finn Williamsc42c3842019-01-22 14:18:11 +00001961
Kevin May7d96b162021-02-03 17:38:41 +00001962void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01001963{
Finn Williamsc42c3842019-01-22 14:18:11 +00001964 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001965}
1966
Kevin May7d96b162021-02-03 17:38:41 +00001967void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01001968{
Finn Williamsc42c3842019-01-22 14:18:11 +00001969 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1970}
Sadik Armagan58f39192018-09-17 14:14:39 +01001971
Kevin May7d96b162021-02-03 17:38:41 +00001972void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan12239e72020-05-27 11:06:17 +01001973{
Jan Eilers2f746b32020-07-28 14:00:06 +01001974 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01001975}
1976
Kevin May7d96b162021-02-03 17:38:41 +00001977void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsc42c3842019-01-22 14:18:11 +00001978{
1979 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1980}
1981
Kevin May7d96b162021-02-03 17:38:41 +00001982void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd99851762019-04-09 09:37:38 +01001983{
1984 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1985}
1986
Kevin May7d96b162021-02-03 17:38:41 +00001987void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
Matthew Sloyan7515d072020-12-16 12:50:01 +00001988{
1989 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
1990}
1991
Kevin May7d96b162021-02-03 17:38:41 +00001992void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
Jan Eilers2f746b32020-07-28 14:00:06 +01001993{
1994 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
1995}
Finn Williamsc42c3842019-01-22 14:18:11 +00001996
Kevin May7d96b162021-02-03 17:38:41 +00001997void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
Finn Williamsc42c3842019-01-22 14:18:11 +00001998{
1999 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01002000 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00002001 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01002002
2003 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2004 CHECK_VALID_SIZE(inputs.size(), 1);
2005
2006 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2007 CHECK_VALID_SIZE(outputs.size(), 1);
2008
James Ward58dec6b2020-09-11 17:32:44 +01002009 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01002010 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00002011 activationDesc.m_Function = activationType;
2012
2013 switch (activationType)
2014 {
2015 case ActivationFunction::ReLu:
2016 {
James Ward58dec6b2020-09-11 17:32:44 +01002017 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002018 break;
2019 }
2020 case ActivationFunction::BoundedReLu:
2021 {
James Ward58dec6b2020-09-11 17:32:44 +01002022 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002023 activationDesc.m_A = 6.0f;
2024 activationDesc.m_B = 0.0f;
2025 break;
2026 }
2027 case ActivationFunction::Sigmoid:
2028 {
James Ward58dec6b2020-09-11 17:32:44 +01002029 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002030 break;
2031 }
Nina Drozd99851762019-04-09 09:37:38 +01002032 case ActivationFunction::TanH:
2033 {
James Ward58dec6b2020-09-11 17:32:44 +01002034 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01002035 activationDesc.m_A = 1.0f;
2036 activationDesc.m_B = 1.0f;
2037 break;
2038 }
Sadik Armagan12239e72020-05-27 11:06:17 +01002039 case ActivationFunction::LeakyReLu:
2040 {
James Ward58dec6b2020-09-11 17:32:44 +01002041 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan12239e72020-05-27 11:06:17 +01002042 const auto * options = operatorPtr->builtin_options.AsLeakyReluOptions();
2043 activationDesc.m_A = options->alpha;
2044 break;
2045 }
Matthew Sloyan7515d072020-12-16 12:50:01 +00002046 case ActivationFunction::Elu:
2047 {
2048 layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
2049 activationDesc.m_A = 1.0f;
2050 break;
2051 }
Jan Eilers2f746b32020-07-28 14:00:06 +01002052 case ActivationFunction::HardSwish:
Matthew Sloyan7515d072020-12-16 12:50:01 +00002053 {
James Ward58dec6b2020-09-11 17:32:44 +01002054 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01002055 break;
Matthew Sloyan7515d072020-12-16 12:50:01 +00002056 }
Finn Williamsc42c3842019-01-22 14:18:11 +00002057 default:
2058 {
2059 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002060 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
2061 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00002062 }
2063 }
2064
2065 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01002066
Sadik Armagand109a4d2020-07-28 10:42:13 +01002067 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan58f39192018-09-17 14:14:39 +01002068 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2069
2070 // register the input connection slots for the layer, connections are made after all layers have been created
2071 // only the tensors for the inputs are relevant, exclude the const tensors
2072 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2073 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2074
2075 // register the output connection slots for the layer, connections are made after all layers have been created
2076 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2077 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2078}
Kevin May7d96b162021-02-03 17:38:41 +00002079armnn::TensorInfo TfLiteParserImpl::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
2080 const std::vector<int32_t> & targetDimsIn)
Sadikb94967b2018-09-19 15:30:00 +01002081{
2082 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2083 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2084
2085 if (stretchDim != targetDimsIn.end())
2086 {
2087 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2088 {
2089 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002090 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01002091 }
2092
2093 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002094 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01002095 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2096
2097 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2098 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2099 }
2100
2101 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2102
2103 TensorInfo reshapeInfo = inputTensorInfo;
2104 reshapeInfo.SetShape(outputShape);
2105
2106 return reshapeInfo;
2107}
2108
Kevin May7d96b162021-02-03 17:38:41 +00002109void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
Sadikb94967b2018-09-19 15:30:00 +01002110{
2111 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2112
2113 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002114
2115 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2116 CHECK_VALID_SIZE(outputs.size(), 1);
2117
2118 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2119 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002120 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002121
2122 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00002123 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01002124 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00002125
Jan Eilersbac9b352020-07-13 13:40:24 +01002126 // Extracting new shape for the output
2127 // There are two ways it can be passed
2128 // * First is to define the target shape in the operator built-in options
2129 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00002130 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01002131 bool targetShapeFound = false;
2132 // Check if built-in options were given
2133 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00002134 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002135 // make sure the parameter is given
2136 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00002137 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002138 targetShape = options->new_shape;
2139 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00002140 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002141 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002142
2143 // If there is no built-in option given or if the built-in new_shape parameter was empty
2144 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00002145 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002146 // Check for a second input tensor
2147 if (inputs.size() > 1 && inputs[1] != nullptr)
2148 {
2149 if (inputs[1]->is_variable)
2150 {
2151 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
2152 }
2153
2154 if (inputs[1]->shape.size() != 1)
2155 {
2156 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
2157 }
2158
2159 if (inputs[1]->type != tflite::TensorType_INT32)
2160 {
2161 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
2162 }
2163
2164 // Extract target shape from input
2165 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2166 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
Sadik Armagan19a1c032021-01-20 12:17:00 +00002167 if (!values)
2168 {
2169 ARMNN_THROW_PARSE_EXCEPTION("Reshape operator target shape input buffer data is null");
2170 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002171 for (int i=0; i < inputs[1]->shape[0]; ++i)
2172 {
2173 targetShape.push_back(values[i]);
2174 }
2175 }
2176 else
Derek Lambertic9e52792020-03-11 11:42:26 +00002177 {
2178 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
2179 "At least one method required");
2180 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002181 }
2182
kevmay0171972a82018-12-17 14:28:03 +00002183 armnn::TensorInfo reshapeOutputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00002184 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01002185
kevmay0171972a82018-12-17 14:28:03 +00002186 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002187 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
2188 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00002189 {
2190 std::stringstream ss;
2191 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002192 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00002193 << " does not equal output shape "
2194 << actualOutputTensorInfo.GetShape()
2195 << ": "
2196 << CHECK_LOCATION().AsString();
2197 throw ParseException(ss.str());
2198 }
2199
Sadikb94967b2018-09-19 15:30:00 +01002200 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00002201 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01002202
Sadikb94967b2018-09-19 15:30:00 +01002203 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002204 ARMNN_ASSERT(layer != nullptr);
kevmay0171972a82018-12-17 14:28:03 +00002205 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01002206
2207 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2208 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2209
2210 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2211 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2212}
2213
Kevin May7d96b162021-02-03 17:38:41 +00002214void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002215{
Sadik Armagana3b31f02019-12-05 09:08:53 +00002216 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
2217}
2218
Kevin May7d96b162021-02-03 17:38:41 +00002219void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagana3b31f02019-12-05 09:08:53 +00002220{
2221 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
2222}
2223
Kevin May7d96b162021-02-03 17:38:41 +00002224void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
Sadik Armagana3b31f02019-12-05 09:08:53 +00002225{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002226 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2227
2228 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2229 CHECK_VALID_SIZE(inputs.size(), 2);
2230
2231 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2232 CHECK_VALID_SIZE(outputs.size(), 1);
2233
2234 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
2235
2236 // Data for the parsed tensor args (size) must be stored locally.
2237 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2238
2239 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2240 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2241
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002242 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002243 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002244 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002245 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2246 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002247
James Ward58dec6b2020-09-11 17:32:44 +01002248 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00002249
2250 switch (resizeMethod)
2251 {
2252 case ResizeMethod::Bilinear:
2253 {
James Ward58dec6b2020-09-11 17:32:44 +01002254 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002255
2256 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2257 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2258
David Monahan4a0c9b92020-05-30 09:48:39 +01002259 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002260 break;
2261 }
2262 case ResizeMethod::NearestNeighbor:
2263 {
James Ward58dec6b2020-09-11 17:32:44 +01002264 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00002265 break;
2266 }
2267 default:
2268 {
2269 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002270 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
2271 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00002272 }
2273 }
2274
James Conroy05102392020-06-24 15:39:55 +01002275 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002276 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002277 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2278
2279 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
2280 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002281 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2282
2283 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2284 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2285
2286 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2287 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2288}
2289
Kevin May7d96b162021-02-03 17:38:41 +00002290void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan479045b2018-10-01 11:51:37 +01002291{
2292 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2293
2294 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2295 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2296
2297 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2298
2299 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2300 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2301 CHECK_VALID_SIZE(outputs.size(), 1);
2302
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002303 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2304 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002305
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002306 const unsigned int concatDimInput = static_cast<unsigned int>(
2307 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002308
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002309 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2310 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002311
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002312 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002313
2314 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2315 {
2316 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2317
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002318 // This set up concatDescriptor view origin
2319 armnnUtils::ProcessConcatInputTensorInfo(
2320 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002321 }
2322
James Ward58dec6b2020-09-11 17:32:44 +01002323 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002324 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002325
Jim Flynn906f9462019-05-10 13:55:21 +01002326 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002327 ARMNN_ASSERT(layer != nullptr);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002328 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002329
James Conroy05102392020-06-24 15:39:55 +01002330 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002331 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002332
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002333 // add fused activation layer
2334 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002335
Sadik Armagan479045b2018-10-01 11:51:37 +01002336 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2337 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2338}
2339
Kevin May7d96b162021-02-03 17:38:41 +00002340void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002341{
2342 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2343
2344 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2345 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2346
2347 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2348
2349 FullyConnectedDescriptor desc;
2350 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002351 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002352
2353 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2354 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2355 CHECK_VALID_SIZE(outputs.size(), 1);
2356
2357 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2358
2359 // Fully Connected Layer accepts two dimensional weights input
2360 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2361 if (weightsDimension != 2)
2362 {
2363 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002364 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
2365 "Node {}",
2366 weightsDimension,
2367 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002368 }
2369
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002370 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01002371 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002372
Finn Williamsd4fa5452021-03-01 12:31:41 +00002373 Optional<ConstTensor> filterOptionalConstTensor;
2374
2375 desc.m_ConstantWeights = IsConstTensor(inputs[1]);
2376
2377 // Either both weights and biases need to be inputs or both weights and biases need to be constant
2378 if (inputs.size() == 3 && desc.m_ConstantWeights != IsConstTensor(inputs[2]))
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002379 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00002380 throw ParseException(
2381 fmt::format("Weights and bias are not compatible."
2382 "Node {}",
2383 CHECK_LOCATION().AsString()));
2384 }
2385
2386 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2387 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0]};
2388 if (desc.m_ConstantWeights)
2389 {
2390 filterOptionalConstTensor = Optional<ConstTensor>(CreateConstTensorNonPermuted(inputs[1], filterTensorInfo));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002391 }
2392 else
2393 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00002394 // Non const weights will need to be registered as inputs
2395 tensorIndexesToRegister.emplace_back(inputTensorIndexes[1]);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002396 }
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002397
Finn Williamsd4fa5452021-03-01 12:31:41 +00002398 Optional<ConstTensor> biasOptionalConstTensor;
2399 if (inputs.size() == 3)
2400 {
2401 desc.m_BiasEnabled = true;
2402 if (desc.m_ConstantWeights)
2403 {
2404 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
2405 biasOptionalConstTensor = Optional<ConstTensor>(CreateConstTensorNonPermuted(inputs[2], biasTensorInfo));
2406 }
2407 else
2408 {
2409 // Non const biases will need to be registered as inputs
2410 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
2411 }
2412 }
2413
2414 layer = m_Network->AddFullyConnectedLayer(desc,
2415 filterOptionalConstTensor,
2416 biasOptionalConstTensor,
2417 layerName.c_str());
2418
2419 ARMNN_ASSERT(layer != nullptr);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002420 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2421
Finn Williamsd4fa5452021-03-01 12:31:41 +00002422 unsigned int startingSlotIndex = 0;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002423 if (inputTensorInfo.GetNumDimensions() > 2)
2424 {
2425 // Add reshape to flatten to 2D [batch_size, input_size],
2426 // where "input_size" corresponds to the number of inputs to the layer,
2427 // matching the second dimension of weights,
2428 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2429 std::vector<unsigned int> reshapedDimensions(2);
2430 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2431 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2432
2433 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2434 {
2435 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002436 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
2437 reshapedDimensions[1],
2438 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002439 }
2440
2441 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2442 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2443
James Ward58dec6b2020-09-11 17:32:44 +01002444 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Finn Williamsd4fa5452021-03-01 12:31:41 +00002445 armnn::ReshapeDescriptor reshapeDescriptor;
2446 reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
2447 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor, layerName.c_str());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002448
2449 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2450 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2451
2452 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
Finn Williamsd4fa5452021-03-01 12:31:41 +00002453 // Fc layer connects to the reshape layer, so we skip the first input slot when registering fc's input slots
2454 tensorIndexesToRegister.erase(tensorIndexesToRegister.begin());
2455 startingSlotIndex = 1;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002456 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00002457
2458 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister, startingSlotIndex);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002459
Sadik Armagand109a4d2020-07-28 10:42:13 +01002460 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002461 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2462
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002463 // we need to add the activation layer and fortunately we don't need to care about the data layout
2464 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2465 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002466
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002467 // register the output connection slots for the layer, connections are made after all layers have been created
2468 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2469 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2470}
2471
Kevin May7d96b162021-02-03 17:38:41 +00002472void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
keidav011b3e2ea2019-02-21 10:07:37 +00002473{
2474 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2475
2476 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2477
2478 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2479 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2480 CHECK_VALID_SIZE(outputs.size(), 4);
2481
2482 // Obtain custom options from flexbuffers
2483 auto custom_options = operatorPtr->custom_options;
2484 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2485
2486 // Obtain descriptor information from tf lite
2487 DetectionPostProcessDescriptor desc;
2488 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2489 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2490 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2491 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2492 desc.m_NumClasses = m["num_classes"].AsUInt32();
2493 desc.m_ScaleH = m["h_scale"].AsFloat();
2494 desc.m_ScaleW = m["w_scale"].AsFloat();
2495 desc.m_ScaleX = m["x_scale"].AsFloat();
2496 desc.m_ScaleY = m["y_scale"].AsFloat();
2497
keidav0107d58c72019-02-26 11:57:39 +00002498 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002499 {
keidav0107d58c72019-02-26 11:57:39 +00002500 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002501 }
2502 if (!(m["detections_per_class"].IsNull()))
2503 {
2504 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2505 }
2506
2507 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2508 {
2509 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2510 "must be positive and less than or equal to 1.");
2511 }
2512
2513 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00002514 auto anchorTensorAndData = CreateConstTensorNonPermuted(inputs[2], anchorTensorInfo);
keidav011b3e2ea2019-02-21 10:07:37 +00002515
James Ward58dec6b2020-09-11 17:32:44 +01002516 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsd4fa5452021-03-01 12:31:41 +00002517 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData,
keidav011b3e2ea2019-02-21 10:07:37 +00002518 layerName.c_str());
2519
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002520 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00002521
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002522 // The model does not specify the output shapes.
2523 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2524 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2525 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2526 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2527 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2528 m_OverridenOutputShapes.push_back({ 1 });
2529
keidav011b3e2ea2019-02-21 10:07:37 +00002530 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2531 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002532 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002533 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2534 }
2535
2536 // Register the input connection slots for the layer, connections are made after all layers have been created
2537 // only the tensors for the inputs are relevant, exclude the const tensors
2538 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2539 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2540
2541 // Register the output connection slots for the layer, connections are made after all layers have been created
2542 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2543 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2544 outputTensorIndexes[1],
2545 outputTensorIndexes[2],
2546 outputTensorIndexes[3]});
2547}
2548
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002549/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
Kevin May7d96b162021-02-03 17:38:41 +00002550void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002551{
2552 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2553
2554 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2555 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2556 CHECK_VALID_SIZE(outputs.size(), 1);
2557
2558 if (inputs.size() < 1)
2559 {
2560 throw ParseException("Pack must have at least one input.");
2561 }
2562
2563 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2564 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2565
2566 StackDescriptor desc;
2567 desc.m_Axis = static_cast<uint32_t>(options->axis);
2568 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2569
2570 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2571 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2572 desc.m_InputShape = inputTensorInfo.GetShape();
2573
James Ward58dec6b2020-09-11 17:32:44 +01002574 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002575 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2576
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002577 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002578
Sadik Armagand109a4d2020-07-28 10:42:13 +01002579 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002580 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2581
2582 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2583 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2584
2585 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2586 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2587}
2588
Kevin May7d96b162021-02-03 17:38:41 +00002589void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd200e3802019-04-15 09:47:39 +01002590{
2591 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2592
2593 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2594 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2595
2596 // This unpackAxis indicates the axis to unpack
2597 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2598
2599 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2600 CHECK_VALID_SIZE(inputs.size(), 1);
2601
2602 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002603
2604 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2605 {
2606 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002607 fmt::format("The unpack axis: {} cannot be greater than or equal to "
2608 "the number of input dimension {} {}",
2609 unpackAxis,
2610 inputTensorInfo.GetNumDimensions(),
2611 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002612 }
2613
Nina Drozd200e3802019-04-15 09:47:39 +01002614 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2615 // If num is not defined, automatically infer from the length of the dimension axis.
2616 if(unpackNum == 0)
2617 {
2618 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2619 }
2620
2621 // If unpack number cannot be inferred and is still zero, throw ParseException.
2622 if(unpackNum == 0)
2623 {
2624 throw ParseException("Number to unpack must greater than zero.");
2625 }
2626
2627 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2628 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2629
2630 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2631 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2632
2633 // Add current input shape to unpackDimSizes
2634 for (unsigned int i = 0; i < inputDimSize; ++i)
2635 {
2636 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2637 }
2638
2639 if (unpackDimSizes[unpackAxis] != unpackNum)
2640 {
2641 throw ParseException("Number to unpack must be the same as length of the dimension to "
2642 "unpack along.");
2643 }
2644
2645 unpackDimSizes[unpackAxis] /= unpackNum;
2646
2647 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2648 for (unsigned int j = 0; j < unpackNum; ++j)
2649 {
2650 // Set the size of the views.
2651 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2652 {
2653 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2654 }
2655 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2656 }
2657
James Ward58dec6b2020-09-11 17:32:44 +01002658 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01002659 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002660 ARMNN_ASSERT(layer != nullptr);
Nina Drozd200e3802019-04-15 09:47:39 +01002661
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002662 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2663 unpackDimSizes.data());
2664
Nina Drozd200e3802019-04-15 09:47:39 +01002665 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2666 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2667
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002668 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2669 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2670 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002671 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01002672 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002673 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002674 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002675 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2676
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002677 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2678 outputTensorInfo.GetDataType(),
2679 outputTensorInfo.GetQuantizationScale(),
2680 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002681 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2682
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002683 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002684
2685 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2686 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2687 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2688 }
Nina Drozd200e3802019-04-15 09:47:39 +01002689}
2690
Kevin May7d96b162021-02-03 17:38:41 +00002691void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd0324f482019-04-08 10:52:10 +01002692{
2693 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2694
2695 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2696 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2697
2698 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2699
Nina Drozd200e3802019-04-15 09:47:39 +01002700 // If number of splits cannot be inferred and is zero, throw ParseException.
2701 if(numSplits == 0)
2702 {
2703 throw ParseException("Number to splits must greater than zero.");
2704 }
2705
Nina Drozd0324f482019-04-08 10:52:10 +01002706 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2707 CHECK_VALID_SIZE(inputs.size(), 2);
2708 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2709 CHECK_VALID_SIZE(outputs.size(), numSplits);
2710
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002711 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2712 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
2713 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Nina Drozd0324f482019-04-08 10:52:10 +01002714
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002715 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002716 if (axisBufferPtr == nullptr)
2717 {
2718 throw ParseException(
2719 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
2720 CHECK_LOCATION().AsString()));
2721 }
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002722
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002723 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
2724 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2725 int32_t axis = axisData[0];
2726
2727 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
2728 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
2729 {
2730 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
2731 // E.g. Rank 4 tensor can have axis in range [-4, 3)
2732 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
2733 throw ParseException(
2734 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
2735 axis,
2736 CHECK_LOCATION().AsString()));
2737 }
2738
2739 const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
Nina Drozd0324f482019-04-08 10:52:10 +01002740
Nina Drozd0324f482019-04-08 10:52:10 +01002741 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002742 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002743 {
2744 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002745 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
2746 inputTensorInfo.GetNumDimensions(),
2747 MaxNumOfTensorDimensions,
2748 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01002749 }
2750
2751 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2752
2753 // Add current input shape to splitterDimSizes
2754 for (unsigned int i = 0; i < inputDimSize; ++i)
2755 {
2756 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2757 }
2758
2759 if (splitterDimSizes[splitDim] % numSplits != 0)
2760 {
2761 throw ParseException("Number of splits must evenly divide the dimension");
2762 }
2763 splitterDimSizes[splitDim] /= numSplits;
2764
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002765 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002766 for (unsigned int j = 0; j < numSplits; ++j)
2767 {
2768 // Set the size of the views.
2769 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2770 {
2771 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2772 }
2773 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2774 }
2775
James Ward58dec6b2020-09-11 17:32:44 +01002776 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01002777 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002778 ARMNN_ASSERT(layer != nullptr);
Nina Drozd0324f482019-04-08 10:52:10 +01002779
2780 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002781 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002782
Nina Drozd0324f482019-04-08 10:52:10 +01002783 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2784 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002785 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002786 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002787 }
2788
2789 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2790 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2791}
2792
Derek Lambertif0176992020-04-28 13:37:49 +01002793unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
2794{
2795 int numDims = armnn::numeric_cast<int>(numDimsIn);
2796 int v = idx < 0 ? numDims + idx : idx;
2797 ARMNN_ASSERT(v >= 0);
2798 ARMNN_ASSERT(v < numDims);
2799
2800 return static_cast<unsigned int>(v);
2801}
2802
Kevin May7d96b162021-02-03 17:38:41 +00002803void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
Derek Lambertif0176992020-04-28 13:37:49 +01002804{
2805 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2806
2807 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Ryan OShea86704732020-05-26 11:41:04 +01002808 const auto * options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01002809
2810 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2811 CHECK_VALID_SIZE(inputs.size(), 3);
2812
2813 auto& inputTensor = inputs[0];
2814 auto& splitsTensor = inputs[1];
2815 auto& axisTensor = inputs[2];
2816
2817 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
2818 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
2819 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
2820 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
2821
2822 // Inputs
2823 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2824 if (inputDimSize > MaxNumOfTensorDimensions)
2825 {
2826 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002827 fmt::format("The number of dimensions: {} for input tensors of the "
2828 "SplitV op cannot be greater than {} {}",
2829 inputTensorInfo.GetNumDimensions(),
2830 MaxNumOfTensorDimensions,
2831 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01002832 }
2833
2834 // Get split axis
2835 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002836 if (axisBufferPtr == nullptr)
2837 {
2838 throw ParseException(
2839 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
2840 CHECK_LOCATION().AsString()));
2841 }
2842
Derek Lambertif0176992020-04-28 13:37:49 +01002843 std::vector<int> axisData(axisTensorInfo.GetNumElements());
2844 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002845 int32_t axis = axisData[0];
2846
2847 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
2848 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
2849 {
2850 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
2851 // E.g. Rank 4 tensor can have axis in range [-4, 3)
2852 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
2853 throw ParseException(
2854 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
2855 axis,
2856 CHECK_LOCATION().AsString()));
2857 }
2858 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
Derek Lambertif0176992020-04-28 13:37:49 +01002859
Derek Lambertif0176992020-04-28 13:37:49 +01002860 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01002861 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01002862 unsigned int numSplits{0};
2863
2864 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01002865 {
2866 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01002867 }
2868 else
2869 {
Ryan OShea86704732020-05-26 11:41:04 +01002870 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01002871 }
2872
2873 if (numSplits <=0)
2874 {
2875 throw ParseException("SplitV has invalid number of splits");
2876 }
2877
Jan Eilersc0761e92020-06-29 16:48:44 +01002878 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01002879 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01002880 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01002881
Jan Eilersc0761e92020-06-29 16:48:44 +01002882 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01002883 int numInferred{0};
2884 unsigned int inferIdx{0};
2885 int splitSum{0};
2886 for (auto split : splitsData)
2887 {
2888 if (split < 0)
2889 {
2890 numInferred++;
2891 inferIdx = idx;
2892 }
2893 else
2894 {
2895 splitSum += split;
2896 }
2897 idx++;
2898 }
2899 // Check for inferred Axis
2900 if (numInferred == 0)
2901 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002902 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01002903 {
2904 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
2905 }
2906 }
2907 else if (numInferred == 1)
2908 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002909 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01002910 }
2911 else
2912 {
2913 throw ParseException("Cannot infer split size for more than one split");
2914 }
2915
Derek Lambertif0176992020-04-28 13:37:49 +01002916 //Ouput size validation
2917 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2918 CHECK_VALID_SIZE(outputs.size(), numSplits);
2919
2920 // Setup Armnn descriptor
2921 SplitterDescriptor splitDesc(numSplits, inputDimSize);
2922 unsigned int accumSplit = 0;
2923 for (unsigned int j = 0; j < numSplits; ++j)
2924 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002925 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01002926
2927 // Set the size of the views.
2928 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
2929 {
2930 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
2931 if (dimIdx == splitDim)
2932 {
2933 dimSize = splitSize;
2934 }
2935 splitDesc.SetViewSize(j, dimIdx, dimSize);
2936 }
2937
2938 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
2939 accumSplit += splitSize;
2940 }
2941
James Ward58dec6b2020-09-11 17:32:44 +01002942 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01002943 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002944 ARMNN_ASSERT(layer != nullptr);
Derek Lambertif0176992020-04-28 13:37:49 +01002945
2946 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2947 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2948
2949 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2950 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002951 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01002952 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
2953 }
2954
2955 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2956 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2957}
2958
Matthew Sloyan28f177c2021-04-09 14:38:52 +01002959void TfLiteParserImpl::ParseArgMin(size_t subgraphIndex, size_t operatorIndex)
2960{
2961 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Min);
2962}
2963
Kevin May7d96b162021-02-03 17:38:41 +00002964void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
Inki Daed4619e22020-09-10 15:33:54 +09002965{
Matthew Sloyan28f177c2021-04-09 14:38:52 +01002966 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Max);
2967}
2968
2969void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, ArgMinMaxFunction argMinMaxFunction)
2970{
Inki Daed4619e22020-09-10 15:33:54 +09002971 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2972 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2973 CHECK_VALID_SIZE(inputs.size(), 2);
2974
2975 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2976 CHECK_VALID_SIZE(outputs.size(), 1);
2977
Matthew Sloyan28f177c2021-04-09 14:38:52 +01002978 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2979 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[1]);
Inki Daed4619e22020-09-10 15:33:54 +09002980 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002981 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Matthew Sloyan28f177c2021-04-09 14:38:52 +01002982
2983 // Check if output tensor type is Signed32 or Signed64
Mike Kelly1f140f72021-04-06 12:25:55 +01002984 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
2985 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
2986 {
2987 throw ParseException(
2988 fmt::format(
2989 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
2990 CHECK_LOCATION().AsString()));
2991 }
Matthew Sloyan28f177c2021-04-09 14:38:52 +01002992
2993 // Get const axis value from model and set it to descriptor.
2994 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2995 if (axisBufferPtr == nullptr)
2996 {
2997 throw ParseException(
2998 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
2999 CHECK_LOCATION().AsString()));
3000 }
3001
3002 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
3003 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
3004 int32_t axis = axisData.front();
3005
3006 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3007 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3008 {
3009 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
3010 // E.g. Rank 4 tensor can have axis in range [-4, 3)
3011 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
3012 throw ParseException(
3013 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
3014 axis,
3015 CHECK_LOCATION().AsString()));
3016 }
3017
3018 ArgMinMaxDescriptor desc;
3019 desc.m_Axis = axis;
3020 desc.m_Function = argMinMaxFunction;
3021
3022 // Register a ArgMin/ArgMax layer.
3023 auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}";
3024 auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3025 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str());
3026 ARMNN_ASSERT(layer != nullptr);
Inki Daed4619e22020-09-10 15:33:54 +09003027 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3028
3029 // Register input tensor to the layer.
3030 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3031 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3032
3033 // Register output tensor to the layer.
3034 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3035 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3036}
3037
Kevin May7d96b162021-02-03 17:38:41 +00003038void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00003039{
3040 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3041
Kevin May7d96b162021-02-03 17:38:41 +00003042 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003043 CHECK_VALID_SIZE(inputs.size(), 2);
Kevin May7d96b162021-02-03 17:38:41 +00003044 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003045 CHECK_VALID_SIZE(outputs.size(), 1);
3046
3047 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3048 armnn::TensorInfo indicesTensorInfo = ToTensorInfo(inputs[1]);
3049 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3050
3051 armnn::GatherDescriptor gatherDescriptor;
3052
3053 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3054 const auto * options = operatorPtr->builtin_options.AsGatherOptions();
3055 auto axis = options->axis;
3056
3057 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3058 auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
3059 auto outputDimensions = outputTensorInfo.GetNumDimensions();
3060 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3061 {
3062 throw ParseException(
3063 fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
3064 axis,
3065 inputDimensions, inputDimensions,
3066 CHECK_LOCATION().AsString()));
3067 }
3068 if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
3069 {
3070 throw ParseException(
3071 fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
3072 outputDimensions,
3073 inputDimensions, indicesDimensions,
3074 CHECK_LOCATION().AsString()));
3075 }
3076
3077 gatherDescriptor.m_Axis = axis;
3078
3079 auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
3080 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
3081 ARMNN_ASSERT(layer != nullptr);
3082 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3083
3084 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3085 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3086
3087 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3088 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3089}
3090
Kevin May7d96b162021-02-03 17:38:41 +00003091void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00003092{
3093 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3094
Kevin May7d96b162021-02-03 17:38:41 +00003095 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003096 CHECK_VALID_SIZE(inputs.size(), 1);
Kevin May7d96b162021-02-03 17:38:41 +00003097 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003098 CHECK_VALID_SIZE(outputs.size(), 1);
3099
3100 armnn::DepthToSpaceDescriptor descriptor;
3101
3102 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3103 const auto * options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
3104 auto blockSize = options->block_size;
3105 if (blockSize < 2)
3106 {
3107 throw ParseException(
3108 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
3109 blockSize,
3110 CHECK_LOCATION().AsString()));
3111 }
3112 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
3113
3114 auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
3115 IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
3116 ARMNN_ASSERT(layer != nullptr);
3117 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3118 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3119
3120 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3121 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3122
3123 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3124 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3125}
3126
Kevin May7d96b162021-02-03 17:38:41 +00003127void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003128{
Sadik Armagana2747482021-02-09 10:28:54 +00003129 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Sum);
3130}
3131
3132void TfLiteParserImpl::ParseReduceMax(size_t subgraphIndex, size_t operatorIndex)
3133{
3134 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Max);
3135}
3136
3137void TfLiteParserImpl::ParseReduceMin(size_t subgraphIndex, size_t operatorIndex)
3138{
3139 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Min);
3140}
3141
3142void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, ReduceOperation reduceOperation)
3143{
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003144 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3145
3146 const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3147 const auto *options = operatorPtr->builtin_options.AsReducerOptions();
3148
3149 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3150 CHECK_VALID_SIZE(inputs.size(), 2);
3151
3152 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3153 CHECK_VALID_SIZE(outputs.size(), 1);
3154
Sadik Armagana2747482021-02-09 10:28:54 +00003155 auto layerName = fmt::format("Reduce:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003156
3157 armnn::TensorInfo inputTensorInfo0 = ToTensorInfo(inputs[0]);
3158 armnn::TensorInfo inputTensorInfo1 = ToTensorInfo(inputs[1]);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003159
3160 ReduceDescriptor desc;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003161 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3162 // Get const axis value from model and set it to descriptor.
3163 if (axisBufferPtr != nullptr)
3164 {
Sadik Armagan49bdb792021-02-11 13:57:07 +00003165 std::vector<int32_t> axisData(inputTensorInfo1.GetNumElements());
3166 ::memcpy(axisData.data(), axisBufferPtr->data.data(), inputTensorInfo1.GetNumBytes());
3167
3168 // Convert the axis to unsigned int and remove duplicates.
3169 auto rank = static_cast<int32_t>(inputTensorInfo0.GetNumDimensions());
3170 std::set<unsigned int> uniqueAxis;
3171 std::transform(axisData.begin(),
3172 axisData.end(),
3173 std::inserter(uniqueAxis, uniqueAxis.begin()),
3174 [rank](int i)->unsigned int{
3175 return static_cast<uint32_t>(((i + rank) % rank)); });
3176 desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003177 }
Sadik Armagana2747482021-02-09 10:28:54 +00003178 else
3179 {
3180 for (uint32_t i = 0; i < inputTensorInfo0.GetNumDimensions(); ++i)
3181 {
3182 desc.m_vAxis.push_back(i);
3183 }
3184 }
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003185
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003186 desc.m_KeepDims = options->keep_dims;
Sadik Armagana2747482021-02-09 10:28:54 +00003187 desc.m_ReduceOperation = reduceOperation;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003188
3189 // Register a new layer object, Sum.
3190 IConnectableLayer *layer = m_Network->AddReduceLayer(desc, layerName.c_str());
3191
3192 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
3193 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3194
3195 // Register input tensor to the layer.
3196 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3197 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3198
3199 // Register output tensor to the layer.
3200 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3201 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3202}
3203
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003204void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
3205{
3206 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
3207}
3208
3209void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
3210{
3211 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
3212}
3213
3214void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
3215{
3216 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
3217}
3218
3219void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
3220{
3221 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
3222}
3223
3224void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
3225{
3226 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
3227}
3228
3229void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
3230{
3231 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3232
3233 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3234 CHECK_VALID_SIZE(inputs.size(), 1);
3235
3236 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3237 CHECK_VALID_SIZE(outputs.size(), 1);
3238
3239 std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
3240 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3241
3242 ElementwiseUnaryDescriptor desc;
3243 desc.m_Operation = unaryOperation;
3244 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
3245 ARMNN_ASSERT(layer != nullptr);
3246
3247 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3248 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3249
3250 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3251 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3252
3253 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3254 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3255}
3256
Kevin May7d96b162021-02-03 17:38:41 +00003257armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
3258 unsigned int outputSlot,
3259 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01003260{
3261 ActivationDescriptor activationDesc;
3262 std::string layerName = prevLayer->GetName();
3263
3264 switch(activationType)
3265 {
3266 case tflite::ActivationFunctionType_NONE:
3267 {
3268 // this is a no-op: return previous layer
3269 return prevLayer;
3270 }
3271 case tflite::ActivationFunctionType_RELU:
3272 {
3273 activationDesc.m_Function = ActivationFunction::ReLu;
3274 layerName += ":RELU";
3275 break;
3276 }
3277 case tflite::ActivationFunctionType_RELU6:
3278 {
3279 activationDesc.m_Function = ActivationFunction::BoundedReLu;
3280 activationDesc.m_A = 6.0f;
3281 activationDesc.m_B = 0.0f;
3282 layerName += ":RELU6";
3283 break;
3284 }
3285 case tflite::ActivationFunctionType_TANH:
3286 {
3287 activationDesc.m_Function = ActivationFunction::TanH;
3288 activationDesc.m_A = 1.0f;
3289 activationDesc.m_B = 1.0f;
3290 layerName += ":TANH";
3291 break;
3292 }
3293
3294 // I only put these here as a reminder what others we could support
3295 case tflite::ActivationFunctionType_RELU_N1_TO_1:
3296 case tflite::ActivationFunctionType_SIGN_BIT:
3297 default:
3298 {
3299 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003300 fmt::format("TfLite parser doesn't suppport fused activation: "
3301 "{}/{} {} ",
3302 activationType,
3303 tflite::EnumNameActivationFunctionType(activationType),
3304 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003305
3306 }
3307 }
3308
3309 IConnectableLayer* activationLayer =
3310 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
3311
3312 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
3313 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
3314 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
3315 return activationLayer;
3316}
3317
Kevin May7d96b162021-02-03 17:38:41 +00003318TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char * fileName)
telsoa01c577f2c2018-08-31 09:22:23 +01003319{
3320 if (fileName == nullptr)
3321 {
James Ward58dec6b2020-09-11 17:32:44 +01003322 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003323 CHECK_LOCATION().AsString()));
3324 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01003325 std::error_code errorCode;
3326 fs::path pathToFile(fileName);
3327 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01003328 {
James Ward58dec6b2020-09-11 17:32:44 +01003329 //fmt::format() could not be used here (format error)
3330 std::stringstream msg;
3331 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
3332 << " " << CHECK_LOCATION().AsString();
3333
3334 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01003335 }
3336 std::ifstream file(fileName, std::ios::binary);
3337 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3338 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3339 fileContent.size());
3340}
3341
Kevin May7d96b162021-02-03 17:38:41 +00003342TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
telsoa01c577f2c2018-08-31 09:22:23 +01003343{
3344 if (binaryContent == nullptr)
3345 {
James Ward58dec6b2020-09-11 17:32:44 +01003346 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003347 CHECK_LOCATION().AsString()));
3348 }
3349 flatbuffers::Verifier verifier(binaryContent, len);
3350 if (verifier.VerifyBuffer<tflite::Model>() == false)
3351 {
3352 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003353 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
3354 "flatbuffers format. size:{} {}",
3355 len,
3356 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003357 }
3358 return tflite::UnPackModel(binaryContent);
3359}
3360
Kevin May7d96b162021-02-03 17:38:41 +00003361TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetInputs(const ModelPtr & model,
3362 size_t subgraphIndex,
3363 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003364{
3365 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3366
Derek Lambertiff05cc52019-04-26 13:05:17 +01003367 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3368 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003369
3370 size_t inputCount = operatorPtr->inputs.size();
3371 TensorRawPtrVector result(inputCount);
3372 for (size_t i=0; i<inputCount; ++i)
3373 {
3374 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003375 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003376 }
3377 return result;
3378}
3379
Kevin May7d96b162021-02-03 17:38:41 +00003380TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetOutputs(const ModelPtr & model,
3381 size_t subgraphIndex,
3382 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003383{
3384 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3385
Derek Lambertiff05cc52019-04-26 13:05:17 +01003386 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3387 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003388
3389 size_t outputCount = operatorPtr->outputs.size();
3390 TensorRawPtrVector result(outputCount);
3391 for (size_t i=0; i<outputCount; ++i)
3392 {
3393 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3394 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003395 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003396 }
3397 return result;
3398}
3399
Kevin May7d96b162021-02-03 17:38:41 +00003400TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphInputs(const ModelPtr & model,
3401 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003402{
3403 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003404 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003405
Derek Lambertiff05cc52019-04-26 13:05:17 +01003406 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003407 TensorIdRawPtrVector result(inputCount);
3408 for (size_t i=0; i<inputCount; ++i)
3409 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003410 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01003411 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003412 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003413 }
3414 return result;
3415}
3416
Kevin May7d96b162021-02-03 17:38:41 +00003417TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphOutputs(const ModelPtr & model,
3418 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003419{
3420 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003421 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003422
Derek Lambertiff05cc52019-04-26 13:05:17 +01003423 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003424 TensorIdRawPtrVector result(outputCount);
3425 for (size_t i=0; i<outputCount; ++i)
3426 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003427 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3428 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003429 }
3430 return result;
3431}
3432
Kevin May7d96b162021-02-03 17:38:41 +00003433std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
3434 size_t subgraphIndex,
3435 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003436{
3437 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003438 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3439 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003440 return operatorPtr->inputs;
3441}
3442
Kevin May7d96b162021-02-03 17:38:41 +00003443std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
3444 size_t subgraphIndex,
3445 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003446{
3447 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003448 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3449 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003450 return operatorPtr->outputs;
3451}
3452
Kevin May7d96b162021-02-03 17:38:41 +00003453void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
3454 size_t operatorIndex,
3455 IConnectableLayer* layer,
Finn Williamsd4fa5452021-03-01 12:31:41 +00003456 const std::vector<unsigned int>& tensorIndexes,
3457 unsigned int startingSlotIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003458{
3459 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003460 ARMNN_ASSERT(layer != nullptr);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003461 if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots())
telsoa01c577f2c2018-08-31 09:22:23 +01003462 {
3463 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003464 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
3465 " for subgraph:{} operator index:{} {}",
3466 tensorIndexes.size(),
3467 layer->GetNumInputSlots(),
3468 subgraphIndex,
3469 operatorIndex,
3470 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003471 }
3472
Finn Williamsd4fa5452021-03-01 12:31:41 +00003473 for (unsigned int index = 0; index < tensorIndexes.size() ; ++index)
telsoa01c577f2c2018-08-31 09:22:23 +01003474 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00003475 unsigned int tensorIndex = tensorIndexes[index];
3476 armnn::IInputSlot* slot = &(layer->GetInputSlot(startingSlotIndex + index));
telsoa01c577f2c2018-08-31 09:22:23 +01003477 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
3478 }
3479}
3480
Kevin May7d96b162021-02-03 17:38:41 +00003481void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
3482 size_t operatorIndex,
3483 IConnectableLayer* layer,
3484 const std::vector<unsigned int>& tensorIndexes)
telsoa01c577f2c2018-08-31 09:22:23 +01003485{
3486 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003487 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003488 if (tensorIndexes.size() != layer->GetNumOutputSlots())
3489 {
3490 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003491 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
3492 " for subgraph:{} operator index:{} {}",
3493 tensorIndexes.size(),
3494 layer->GetNumOutputSlots(),
3495 subgraphIndex,
3496 operatorIndex,
3497 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003498 }
3499
3500 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
3501 {
3502 unsigned int tensorIndex = tensorIndexes[slotIndex];
3503 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
3504 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3505 }
3506}
3507
Kevin May7d96b162021-02-03 17:38:41 +00003508void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003509{
3510 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3511
3512 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
3513 for (auto const & tensorIdAndPtr : inputs)
3514 {
3515 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3516 IConnectableLayer* layer =
3517 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3518
3519 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
3520 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3521
3522 RegisterOutputSlots(subgraphIndex,
3523 VIRTUAL_OPERATOR_ID,
3524 layer,
3525 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3526 }
3527}
3528
Kevin May7d96b162021-02-03 17:38:41 +00003529void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003530{
3531 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3532
3533 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
3534 for (auto const & tensorIdAndPtr : outputs)
3535 {
3536 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3537 IConnectableLayer* layer =
3538 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3539
3540 RegisterInputSlots(subgraphIndex,
3541 VIRTUAL_OPERATOR_ID,
3542 layer,
3543 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3544 }
3545}
3546
Kevin May7d96b162021-02-03 17:38:41 +00003547void TfLiteParserImpl::SetupConstantLayers(size_t subgraphIndex)
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003548{
3549 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3550
Derek Lambertiff05cc52019-04-26 13:05:17 +01003551 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003552 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
3553 {
3554 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
3555 {
3556 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
3557 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
3558 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003559 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003560 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003561 auto tensorAndData = CreateConstTensorNonPermuted(tensorPtr, tensorInfo);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003562
James Ward58dec6b2020-09-11 17:32:44 +01003563 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003564 IConnectableLayer *layer =
Finn Williamsd4fa5452021-03-01 12:31:41 +00003565 m_Network->AddConstantLayer(tensorAndData, layerName.c_str());
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003566
3567 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3568 RegisterOutputSlots(subgraphIndex,
3569 VIRTUAL_OPERATOR_ID,
3570 layer,
3571 { tensorIndex });
3572
3573 }
3574 }
3575 }
3576}
3577
telsoa01c577f2c2018-08-31 09:22:23 +01003578// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Kevin May7d96b162021-02-03 17:38:41 +00003579TfLiteParserImpl::BufferRawPtr TfLiteParserImpl::GetBuffer(const ModelPtr& model, size_t bufferIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003580{
3581 CHECK_BUFFER(model, bufferIndex);
3582 return model->buffers[bufferIndex].get();
3583}
3584
Matteo Martincigh747ef822018-12-18 09:26:39 +00003585template<typename T>
Kevin May7d96b162021-02-03 17:38:41 +00003586std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
3587TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
3588 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00003589 armnn::TensorInfo& tensorInfo,
3590 armnn::Optional<armnn::PermutationVector&> permutationVector)
3591{
3592 auto constData = CreateConstTensorImpl<T>(bufferPtr,
3593 tensorPtr,
3594 tensorInfo,
3595 permutationVector);
Kevin May7d96b162021-02-03 17:38:41 +00003596 TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
Matteo Martincigh747ef822018-12-18 09:26:39 +00003597 return std::make_pair(constData.first, std::move(storage));
3598}
3599
Finn Williamsd4fa5452021-03-01 12:31:41 +00003600bool TfLiteParserImpl::IsConstTensor(TensorRawPtr tensorPtr)
3601{
3602 CHECK_TENSOR_PTR(tensorPtr);
3603 return !tensorPtr->is_variable;
3604}
3605
3606
Kevin May7d96b162021-02-03 17:38:41 +00003607std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
Finn Williamsd4fa5452021-03-01 12:31:41 +00003608TfLiteParserImpl::CreateConstTensorPermuted(TensorRawPtr tensorPtr,
3609 armnn::TensorInfo& tensorInfo,
3610 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01003611{
3612 CHECK_TENSOR_PTR(tensorPtr);
3613 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
3614 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
3615
3616 switch (tensorInfo.GetDataType())
3617 {
3618 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003619 return CreateConstTensorAndStoreData<float>(bufferPtr,
3620 tensorPtr,
3621 tensorInfo,
3622 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00003623 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003624 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
3625 tensorPtr,
3626 tensorInfo,
3627 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00003628 case armnn::DataType::QSymmS8:
3629 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3630 tensorPtr,
3631 tensorInfo,
3632 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00003633 case armnn::DataType::QAsymmS8:
3634 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3635 tensorPtr,
3636 tensorInfo,
3637 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003638 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003639 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
3640 tensorPtr,
3641 tensorInfo,
3642 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003643 default:
3644 {
3645 std::stringstream errString;
3646 errString << "Unexpected datatype when creating const tensor: "
3647 << armnn::GetDataTypeName(tensorInfo.GetDataType())
3648 << " shape:" << tensorInfo.GetShape()
3649 << CHECK_LOCATION().AsString();
3650 throw ParseException(errString.str());
3651 }
3652 }
3653}
3654
Finn Williamsd4fa5452021-03-01 12:31:41 +00003655armnn::ConstTensor TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
3656 armnn::TensorInfo& tensorInfo)
3657{
3658 CHECK_TENSOR_PTR(tensorPtr);
3659 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
3660 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
3661
3662 return ConstTensor(tensorInfo, bufferPtr->data.data());
3663}
3664
Kevin May7d96b162021-02-03 17:38:41 +00003665BindingPointInfo TfLiteParserImpl::GetNetworkInputBindingInfo(size_t subgraphId,
3666 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01003667{
3668 CHECK_SUBGRAPH(m_Model, subgraphId);
3669 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3670 for (auto const & input : inputs)
3671 {
3672 if (input.second->name == name)
3673 {
3674 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3675 return std::make_pair(bindingId, ToTensorInfo(input.second));
3676 }
3677 }
3678
3679 std::stringstream bindings;
3680 for (auto const & input : inputs)
3681 {
3682 bindings << "'" << input.second->name << "' ";
3683 }
3684
3685 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003686 fmt::format("No input binding found for subgraph:{} and name:{}. "
3687 "Possible inputs are: [{}] {}",
3688 subgraphId,
3689 name,
3690 bindings.str(),
3691 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003692}
3693
Kevin May7d96b162021-02-03 17:38:41 +00003694BindingPointInfo TfLiteParserImpl::GetNetworkOutputBindingInfo(size_t subgraphId,
3695 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01003696{
3697 CHECK_SUBGRAPH(m_Model, subgraphId);
3698 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003699 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01003700 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003701 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01003702 if (output.second->name == name)
3703 {
3704 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003705 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3706 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3707 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01003708 }
3709 }
3710
3711 std::stringstream bindings;
3712 for (auto const & output : outputs)
3713 {
3714 bindings << "'" << output.second->name << "' ";
3715 }
3716
3717 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003718 fmt::format("No output binding found for subgraph:{} and name:{}. "
3719 "Possible outputs are: [{}] {}",
3720 subgraphId,
3721 name,
3722 bindings.str(),
3723 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003724}
3725
Kevin May7d96b162021-02-03 17:38:41 +00003726size_t TfLiteParserImpl::GetSubgraphCount() const
telsoa01c577f2c2018-08-31 09:22:23 +01003727{
3728 return m_Model->subgraphs.size();
3729}
3730
Kevin May7d96b162021-02-03 17:38:41 +00003731std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01003732{
3733 CHECK_SUBGRAPH(m_Model, subgraphId);
3734 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3735 std::vector<std::string> result;
3736 result.reserve(inputs.size());
3737 for (auto const & input : inputs)
3738 {
3739 result.push_back(input.second->name);
3740 }
3741 return result;
3742}
3743
Kevin May7d96b162021-02-03 17:38:41 +00003744std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01003745{
3746 CHECK_SUBGRAPH(m_Model, subgraphId);
3747 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3748 std::vector<std::string> result;
3749 result.reserve(outputs.size());
3750 for (auto const & output : outputs)
3751 {
3752 result.push_back(output.second->name);
3753 }
3754 return result;
3755}
3756
Matthew Sloyanac001ee2021-02-03 10:43:04 +00003757const std::string TfLiteParserImpl::GetVersion()
3758{
3759 return TFLITE_PARSER_VERSION;
3760}
3761
Kevin May7d96b162021-02-03 17:38:41 +00003762TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01003763: m_FloatData(std::move(data))
3764, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003765, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003766, m_Int32Data(nullptr)
3767{
3768}
3769
Kevin May7d96b162021-02-03 17:38:41 +00003770TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01003771: m_FloatData(nullptr)
3772, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00003773, m_Int8Data(nullptr)
3774, m_Int32Data(nullptr)
3775{
3776}
3777
Kevin May7d96b162021-02-03 17:38:41 +00003778TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
Keith Davisd305e1a2020-01-22 11:57:54 +00003779: m_FloatData(nullptr)
3780, m_Uint8Data(nullptr)
3781, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01003782, m_Int32Data(nullptr)
3783{
3784}
3785
Kevin May7d96b162021-02-03 17:38:41 +00003786TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01003787: m_FloatData(nullptr)
3788, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003789, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003790, m_Int32Data(std::move(data))
3791{
3792}
3793
3794} // armnnTfLiteParser