blob: 68dbbd120fbc8c33382096f05391b1fd38e2c9f1 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kellyc5789ca2020-07-06 19:24:15 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Sloyanac001ee2021-02-03 10:43:04 +00008#include "armnnTfLiteParser/Version.hpp"
9
Sadik Armagand109a4d2020-07-28 10:42:13 +010010#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +000011#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010012#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000013#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010014#include <armnn/Tensor.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000015#include <armnnUtils/TensorUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010016#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010017#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000018#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010019#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010020
21// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000022#include <armnnUtils/Permute.hpp>
Rob Hughes9542f902021-07-14 09:48:54 +010023#include <armnnUtils/Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000024
Sadik Armagan479045b2018-10-01 11:51:37 +010025#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026#include <VerificationHelpers.hpp>
27
28// The generated code based on the Tf Lite schema:
29#include <schema_generated.h>
30
Matteo Martincighe011d202019-11-28 11:35:47 +000031#include <flatbuffers/flexbuffers.h>
32
James Ward58dec6b2020-09-11 17:32:44 +010033#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010034
Jim Flynnfca233e2021-09-23 12:16:53 +010035#include <tensorflow/lite/version.h>
36
telsoa01c577f2c2018-08-31 09:22:23 +010037#include <algorithm>
Matthew Sloyanac001ee2021-02-03 10:43:04 +000038#include <fstream>
39#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010040#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010041#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000042#include <sstream>
43
44#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
45 { \
46 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
47 << ": " \
48 << CHECK_LOCATION().AsString()).str()); \
49 }
telsoa01c577f2c2018-08-31 09:22:23 +010050
51using namespace armnn;
52using armnn::CheckLocation;
53namespace armnnTfLiteParser
54{
Kevin May7d96b162021-02-03 17:38:41 +000055
56ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
57 pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
58
59ITfLiteParser::~ITfLiteParser() = default;
60
61ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
62{
63 return new ITfLiteParser(options);
64}
65
66ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
67{
68 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
69}
70
71void ITfLiteParser::Destroy(ITfLiteParser* parser)
72{
73 delete parser;
74}
75
76armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
77{
78 return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
79}
80
81armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
82{
83 return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
84}
85
86BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
87 const std::string& name) const
88{
89 return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
90}
91
92BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
93 const std::string& name) const
94{
95 return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
96}
97
98size_t ITfLiteParser::GetSubgraphCount() const
99{
100 return pTfLiteParserImpl->GetSubgraphCount();
101}
102
103std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
104{
105 return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
106}
107
108std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
109{
110 return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
111}
112
telsoa01c577f2c2018-08-31 09:22:23 +0100113namespace
114{
jimfly01c25411c2018-11-14 17:47:22 +0000115
telsoa01c577f2c2018-08-31 09:22:23 +0100116const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
117
Kevin May7d96b162021-02-03 17:38:41 +0000118void CheckSubgraph(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100119 size_t subgraphIndex,
120 const CheckLocation & location)
121{
122 if (model.get() == nullptr)
123 {
124 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100125 fmt::format("{} was called with invalid (null) model. "
126 "Possible reason is that the model is not yet loaded and Unpack(ed). "
127 "subgraph:{} at {}",
128 location.m_Function,
129 subgraphIndex,
130 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100131 }
132 else if (subgraphIndex >= model->subgraphs.size())
133 {
134 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100135 fmt::format("{} was called with an invalid subgraph index. "
136 "subgraph:{} at {}",
137 location.m_Function,
138 subgraphIndex,
139 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100140 }
141}
142
143#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
144 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
145
Kevin May7d96b162021-02-03 17:38:41 +0000146void CheckModel(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100147 size_t subgraphIndex,
148 size_t operatorIndex,
149 const CheckLocation & location)
150{
151 if (model.get() == nullptr)
152 {
153 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100154 fmt::format("{} was called with invalid (null) model. "
155 "Possible reason is that the model is not yet loaded and Unpack(ed). "
156 "subgraph:{} operator:{} at {}",
157 location.m_Function,
158 subgraphIndex,
159 operatorIndex,
160 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100161 }
162 else if (subgraphIndex >= model->subgraphs.size())
163 {
164 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100165 fmt::format("{} was called with an invalid subgraph index. "
166 "subgraph:{} operator:{} at {}",
167 location.m_Function,
168 subgraphIndex,
169 operatorIndex,
170 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100171 }
172 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
173 operatorIndex != VIRTUAL_OPERATOR_ID)
174 {
175 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100176 fmt::format("{} was called with an invalid operator index. "
177 "subgraph:{} operator:{} at {}",
178 location.m_Function,
179 subgraphIndex,
180 operatorIndex,
181 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100182 }
183}
184
185#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
186 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
187
Kevin May7d96b162021-02-03 17:38:41 +0000188void CheckTensor(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100189 size_t subgraphIndex,
190 size_t tensorIndex,
191 const CheckLocation & location)
192{
193 // not checking model, because I assume CHECK_MODEL already run
194 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100195 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100196
197 // also subgraph index should be checked by CHECK_MODEL so
198 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100199 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100200
201 // the tensor index is the only one to check here
202 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
203 {
204 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100205 fmt::format("{} was called with an invalid tensor index. "
206 "subgraph:{} tensor:{} at {}",
207 location.m_Function,
208 subgraphIndex,
209 tensorIndex,
210 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100211 }
212}
213
214#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
215 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
216
Kevin May7d96b162021-02-03 17:38:41 +0000217void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100218 const CheckLocation & location)
219{
220 if (rawPtr == nullptr)
221 {
222 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100223 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100224 }
225}
226
227#define CHECK_TENSOR_PTR(TENSOR_PTR) \
228 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
229
Kevin May7d96b162021-02-03 17:38:41 +0000230void CheckBuffer(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100231 size_t bufferIndex,
232 const CheckLocation & location)
233{
234 if (model.get() == nullptr)
235 {
236 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100237 fmt::format("{} was called with invalid (null) model. "
238 "Possible reason is that the model is not yet loaded and Unpack(ed). "
239 "buffer:{} at {}",
240 location.m_Function,
241 bufferIndex,
242 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100243 }
244 else if (bufferIndex >= model->buffers.size())
245 {
246 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100247 fmt::format("{} was called with an invalid buffer index. "
248 "buffer index:{} at {}",
249 location.m_Function,
250 bufferIndex,
251 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100252 }
253 else if (model->buffers[bufferIndex].get() == nullptr)
254 {
255 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100256 fmt::format("The buffer #{} is null. {}",
257 bufferIndex,
258 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100259 }
260}
261
262#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
263 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
264
Kevin May7d96b162021-02-03 17:38:41 +0000265void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100266 const armnn::TensorInfo & tensorInfo,
267 uint32_t bufferId,
268 const CheckLocation & location)
269{
270 if (bufferPtr == nullptr)
271 {
272 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100273 fmt::format("BufferPtr is null for buffer:{}. {}",
274 bufferId,
275 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100276 }
277 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
278 tensorInfo.GetNumBytes() > bufferPtr->data.size())
279 {
280 std::stringstream ss;
281 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
282 << "For tensor: " << tensorInfo.GetShape()
283 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
284 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
285 throw ParseException(ss.str());
286 }
287}
288
289#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
290 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
291
292bool IsActivationSupported(tflite::ActivationFunctionType activationType)
293{
294 switch(activationType)
295 {
296 case tflite::ActivationFunctionType_NONE:
297 case tflite::ActivationFunctionType_RELU:
298 case tflite::ActivationFunctionType_RELU6:
299 case tflite::ActivationFunctionType_TANH:
300 {
301 return true;
302 }
303 default:
304 {
305 return false;
306 }
307 }
308}
309
310#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
311 do { \
312 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
313 { \
314 throw ParseException( \
James Ward58dec6b2020-09-11 17:32:44 +0100315 fmt::format("TfLite parser doesn't suppport fused activation: " \
316 "{}/{} in {} subgraph:{} operator:{} at {}", \
317 OPTION->fused_activation_function, \
318 tflite::EnumNameActivationFunctionType(\
319 OPTION->fused_activation_function), \
320 __func__, \
321 SUBGRAPH_INDEX, \
322 OPERATOR_INDEX, \
323 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100324 } \
325 } while(false)
326
327
328std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
329{
330 std::vector<unsigned int> result;
331 result.reserve(in.size());
332 for (auto & i : in)
333 {
mathad01c21025d2021-04-26 10:09:37 +0100334 // If the location of the input data is -1 then the input should be ignored.
335 if (i == -1)
336 {
337 continue;
338 }
telsoa01c577f2c2018-08-31 09:22:23 +0100339 result.push_back(CHECKED_NON_NEGATIVE(i));
340 }
341 return result;
342}
343
344void CalcPadding(uint32_t inputSize,
345 uint32_t filterSize,
346 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100347 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100348 uint32_t& paddingFront,
349 uint32_t& paddingBack,
350 tflite::Padding padding)
351{
352 paddingFront = 0;
353 paddingBack = 0;
354 if (padding == tflite::Padding_SAME)
355 {
356 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100357 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
358 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100359 if (temp > inputSize)
360 {
361 paddingFront = (temp - inputSize) / 2;
362 paddingBack = (temp - inputSize) - paddingFront;
363 }
364 }
365}
366
Kevin May7d96b162021-02-03 17:38:41 +0000367armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Finn Williamsb49ed182021-06-29 15:50:08 +0100368 const std::vector<unsigned int>& shape,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100369 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100370{
371 armnn::DataType type;
372 CHECK_TENSOR_PTR(tensorPtr);
373
374 switch (tensorPtr->type)
375 {
376 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000377 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100378 break;
379 case tflite::TensorType_FLOAT32:
380 type = armnn::DataType::Float32;
381 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000382 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000383 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000384 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000385 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000386 type = armnn::DataType::QAsymmS8;
387 }
388 else
389 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000390 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000391 type = armnn::DataType::QSymmS8;
392 }
Finn Williamsed66d142019-12-06 09:55:55 +0000393 break;
394 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000395 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000396 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100397 case tflite::TensorType_INT32:
398 type = armnn::DataType::Signed32;
399 break;
Inki Daed4619e22020-09-10 15:33:54 +0900400 case tflite::TensorType_INT64:
401 type = armnn::DataType::Signed64;
402 break;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100403 case tflite::TensorType_BOOL:
404 type = armnn::DataType::Boolean;
405 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100406 default:
407 {
408 CheckLocation location = CHECK_LOCATION();
409 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100410 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
411 tensorPtr->type,
412 tflite::EnumNameTensorType(tensorPtr->type),
413 tensorPtr->name,
414 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100415 }
416 }
Finn Williamsb49ed182021-06-29 15:50:08 +0100417 TensorShape tensorShape;
418
419 std::vector<unsigned int> safeShape = shape;
420 if (shape.size() == 0)
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100421 {
422 safeShape.push_back(1);
Finn Williamsb49ed182021-06-29 15:50:08 +0100423 }
424
425 if (!outputTensor)
426 {
427 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()), safeShape.data());
428 }
429 else
430 {
Rob Hughesd812a312021-08-06 13:10:53 +0100431 size_t shapeSignatureSize = tensorPtr->shape_signature.size();
Finn Williamsb49ed182021-06-29 15:50:08 +0100432
433 // If a shape signature exists we will use that to infer dynamic tensors
434 if (shapeSignatureSize != 0)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100435 {
Finn Williamsb49ed182021-06-29 15:50:08 +0100436 // If the shape is incompatible with the shape signature override the shape
437 if (shapeSignatureSize != shape.size())
438 {
439 safeShape = {};
440
441 for (unsigned int i = 0; i < shapeSignatureSize; ++i)
442 {
443 unsigned int dim = tensorPtr->shape_signature[i] > -1 ?
444 static_cast<unsigned int>(tensorPtr->shape_signature[i]) : 0;
445 safeShape.push_back(dim);
446 }
447 }
448
Rob Hughesd812a312021-08-06 13:10:53 +0100449 std::unique_ptr<bool[]> dimMask = std::make_unique<bool[]>(tensorPtr->shape_signature.size());
Finn Williamsb49ed182021-06-29 15:50:08 +0100450 for (unsigned int i = 0; i < tensorPtr->shape_signature.size(); ++i)
451 {
452 dimMask[i] = tensorPtr->shape_signature[i] == -1 ? false : true;
453 }
Rob Hughesd812a312021-08-06 13:10:53 +0100454 tensorShape = TensorShape(static_cast<unsigned int>(safeShape.size()), safeShape.data(), dimMask.get());
Finn Williamsb49ed182021-06-29 15:50:08 +0100455 }
456 // If there is no shape signature treat the tensor as dynamic if the shape has a size of zero
457 else if (shape.size() == 0)
458 {
459 tensorShape = TensorShape(1, false);
460 }
461 else
462 {
463 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(shape.size()), shape.data());
Sadik Armagand109a4d2020-07-28 10:42:13 +0100464 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100465 }
466
Keith Davisd305e1a2020-01-22 11:57:54 +0000467 float quantizationScale = 0.0f;
468 int32_t quantizationOffset = 0;
469
470 if (tensorPtr->quantization.get())
471 {
472 if (tensorPtr->quantization->scale.size() <= 1)
473 {
474 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
475 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
476
477 if (tensorPtr->quantization->scale.size() == 1)
478 {
479 quantizationScale = tensorPtr->quantization->scale[0];
480 }
481 if (tensorPtr->quantization->zero_point.size() == 1)
482 {
483 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000484 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100485 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000486 }
487
Sadik Armagand109a4d2020-07-28 10:42:13 +0100488 armnn::TensorInfo result(tensorShape,
489 type,
490 quantizationScale,
491 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000492 return result;
493 }
494 else
495 {
496 std::vector<float> quantizationScales;
497 std::vector<int32_t> quantizationOffsets;
498
499 // Scale
500 std::copy(tensorPtr->quantization->scale.begin(),
501 tensorPtr->quantization->scale.end(),
502 std::back_inserter(quantizationScales));
503
Keith Davis0c2eeac2020-02-11 16:51:50 +0000504 // QSymmS8 Per-axis
Sadik Armagand109a4d2020-07-28 10:42:13 +0100505 armnn::TensorInfo result(tensorShape,
506 type,
507 quantizationScales,
Jan Eilers7612bd62021-04-06 17:29:03 +0100508 armnn::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
Keith Davisd305e1a2020-01-22 11:57:54 +0000509 return result;
510 }
511 }
512 else
513 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100514 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000515 type,
516 quantizationScale,
517 quantizationOffset);
518 return result;
519 }
telsoa01c577f2c2018-08-31 09:22:23 +0100520}
521
Jan Eilers7612bd62021-04-06 17:29:03 +0100522armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr)
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000523{
524 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100525 return ToTensorInfo(tensorPtr, dimensions);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000526}
527
Kevin May7d96b162021-02-03 17:38:41 +0000528armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100529 const bool outputTensor)
530{
531 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100532 return ToTensorInfo(tensorPtr, dimensions, outputTensor);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100533}
534
telsoa01c577f2c2018-08-31 09:22:23 +0100535template<typename T>
536std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
Kevin May7d96b162021-02-03 17:38:41 +0000537CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
538 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000539 armnn::TensorInfo& tensorInfo,
540 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100541{
Jan Eilers8eb25602020-03-09 12:13:48 +0000542 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100543 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
544 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
James Ward58dec6b2020-09-11 17:32:44 +0100545 fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
telsoa01c577f2c2018-08-31 09:22:23 +0100546
547 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000548
549 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
550 {
551 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000552 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
553 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000554 }
555 else
556 {
557 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
558 }
559
Matthew Sloyan81beae32021-07-13 19:46:11 +0100560 // Make sure isConstant flag is set.
561 tensorInfo.SetConstant();
562
telsoa01c577f2c2018-08-31 09:22:23 +0100563 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
564}
565
telsoa01c577f2c2018-08-31 09:22:23 +0100566armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
567{
568 // generate the binding id by shifting the tensor id by 8 bit
569 // and add the subgraph id, which allows 256 subgraphs
570 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
571}
572
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000573bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
574{
575 const unsigned int actualSize = actual.GetNumDimensions();
576 if (actualSize != expected.size())
577 {
578 return false;
579 }
580
581 for (unsigned int i = 0u; i < actualSize; i++)
582 {
583 if (expected[i] < 0 ||
584 actual[i] != static_cast<unsigned int>(expected[i]))
585 {
586 return false;
587 }
588 }
589
590 return true;
591}
592
James Conroy05102392020-06-24 15:39:55 +0100593void CheckMatchingQuantization(const TensorInfo& first,
594 const TensorInfo& second,
595 const std::string& descName,
596 std::string const& firstName,
597 std::string const& secondName)
598{
599 if (!first.IsQuantized() ||
600 !second.IsQuantized())
601 {
602 // Not a quantized type, ignore the validation
603 return;
604 }
605
606 DataType firstDataType = first.GetDataType();
607 DataType secondDataType = second.GetDataType();
608
609 if (firstDataType != secondDataType)
610 {
611 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
612 " must be of the same quantized type, " +
613 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
614 secondName + " is " + GetDataTypeName(secondDataType));
615 }
616
617 if (!first.IsTypeSpaceMatch(second))
618 {
619 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
620 " must have the same quantization space, " +
621 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
622 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
623 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
624 " and scale " + std::to_string(second.GetQuantizationScale()));
625 }
626}
627
telsoa01c577f2c2018-08-31 09:22:23 +0100628} // <anonymous>
629
Kevin May7d96b162021-02-03 17:38:41 +0000630TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100631: m_Options(options)
632, m_Network(nullptr, nullptr)
Kevin May7d96b162021-02-03 17:38:41 +0000633, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
telsoa01c577f2c2018-08-31 09:22:23 +0100634{
635 // register supported operators
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100636 m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
Kevin May7d96b162021-02-03 17:38:41 +0000637 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100638 m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
639 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
Kevin May7d96b162021-02-03 17:38:41 +0000640 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
641 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
mathad01b392e982021-04-07 12:07:30 +0100642 m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
Kevin May7d96b162021-02-03 17:38:41 +0000643 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
644 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
645 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
646 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
647 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
648 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100649 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000650 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300651 m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000652 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
Teresa Charlin3ab85482021-06-08 16:59:29 +0100653 m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
Kevin May7d96b162021-02-03 17:38:41 +0000654 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
655 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300656 m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
657 m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000658 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
659 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300660 m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
661 m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
Mike Kelly31dce2b2021-09-01 21:22:37 +0100662 m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
663 = &TfLiteParserImpl::ParseLocalResponseNormalization;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100664 m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
Kevin May7d96b162021-02-03 17:38:41 +0000665 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
666 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
667 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
668 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
669 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
670 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
671 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
672 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300673 m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000674 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
675 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +0100676 m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
Kevin May7d96b162021-02-03 17:38:41 +0000677 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
678 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
679 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
Sadik Armagana2747482021-02-09 10:28:54 +0000680 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
681 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
Teresa Charlin4e3e8312021-08-05 12:34:37 +0100682 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
Kevin May7d96b162021-02-03 17:38:41 +0000683 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
684 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
685 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100686 m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
Keith Davis0176fd82021-06-01 17:36:32 +0100687 m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
Kevin May7d96b162021-02-03 17:38:41 +0000688 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
689 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
690 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
691 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
692 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
693 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
694 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
695 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
696 m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
697 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
698 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
699 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
700 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100701
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100702 // register supported custom operators
Kevin May7d96b162021-02-03 17:38:41 +0000703 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100704}
705
Kevin May7d96b162021-02-03 17:38:41 +0000706void TfLiteParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100707{
708 m_Network = armnn::INetworkPtr(nullptr, nullptr);
709 m_Model = nullptr;
710 m_SubgraphConnections.clear();
711}
712
Kevin May7d96b162021-02-03 17:38:41 +0000713INetworkPtr TfLiteParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100714{
715 ResetParser();
716 m_Model = LoadModelFromFile(graphFile);
717 return CreateNetworkFromModel();
718}
719
Kevin May7d96b162021-02-03 17:38:41 +0000720INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
telsoa01c577f2c2018-08-31 09:22:23 +0100721{
722 ResetParser();
723 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
724 return CreateNetworkFromModel();
725}
726
Finn Williamsb49ed182021-06-29 15:50:08 +0100727
728armnn::INetworkPtr TfLiteParserImpl::LoadModel(std::unique_ptr<tflite::ModelT> model)
729{
730 ResetParser();
731 m_Model = std::move(model);
732
733 return CreateNetworkFromModel();
734}
735
Kevin May7d96b162021-02-03 17:38:41 +0000736INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
telsoa01c577f2c2018-08-31 09:22:23 +0100737{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100738
739 using NetworkOptions = std::vector<BackendOptions>;
740 NetworkOptions networkOptions = {};
741 if (m_Options && m_Options.value().m_InferAndValidate)
742 {
743 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
744 {
745 { "InferAndValidate", true }
746 });
747
748 networkOptions.push_back(shapeInferenceMethodOption);
749 }
750
751 m_Network = INetwork::Create(networkOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100752 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100753
telsoa01c577f2c2018-08-31 09:22:23 +0100754 if (m_Model->subgraphs.size() != 1)
755 {
756 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100757 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
758 m_Model->subgraphs.size(),
759 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100760 }
761
762 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +0100763 size_t operatorIndex = 0;
764 try
telsoa01c577f2c2018-08-31 09:22:23 +0100765 {
Colm Donelan6350d272020-06-09 16:56:25 +0100766 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100767 {
Colm Donelan6350d272020-06-09 16:56:25 +0100768 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
769 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +0100770 {
Colm Donelan6350d272020-06-09 16:56:25 +0100771 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
Jim Flynnfca233e2021-09-23 12:16:53 +0100772
773// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
774#if TF_MAJOR_VERSION > 2 || (TF_MAJOR_VERSION == 2 && TF_MINOR_VERSION > 3)
775 auto builtinCode = std::max(opCodePtr->builtin_code,
776 static_cast<tflite::BuiltinOperator>(opCodePtr->deprecated_builtin_code));
777#else
telsoa01c577f2c2018-08-31 09:22:23 +0100778 auto builtinCode = opCodePtr->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +0100779#endif
telsoa01c577f2c2018-08-31 09:22:23 +0100780
781 if (builtinCode > tflite::BuiltinOperator_MAX)
782 {
James Ward58dec6b2020-09-11 17:32:44 +0100783 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
784 "subgraph:{} operator idx:{}. {}",
785 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
786 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100787 }
788
789 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +0100790 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +0100791 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +0100792 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +0100793 }
telsoa01c577f2c2018-08-31 09:22:23 +0100794
Colm Donelan6350d272020-06-09 16:56:25 +0100795 SetupInputLayers(subgraphIndex);
796 SetupOutputLayers(subgraphIndex);
797 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100798
Colm Donelan6350d272020-06-09 16:56:25 +0100799 ++subgraphIndex;
800 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +0100801 }
telsoa01c577f2c2018-08-31 09:22:23 +0100802 }
Colm Donelan6350d272020-06-09 16:56:25 +0100803 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +0100804 {
Colm Donelan6350d272020-06-09 16:56:25 +0100805 std::stringstream errorString;
806 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
807 << subgraphIndex << " error: " << e.what();
808 ARMNN_LOG(error) << errorString.str();
809 std::stringstream errors;
810 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +0100811 throw ParseException(errors.str());
812 }
813
814 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +0100815 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100816 {
817 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
818 {
819 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
820 {
821 for (size_t inputSlotIdx = 0;
822 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
823 ++inputSlotIdx)
824 {
825 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
826 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
827 }
828 }
829 }
830 }
831
832 return std::move(m_Network);
833}
834
Kevin May7d96b162021-02-03 17:38:41 +0000835void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
836 size_t tensorIndex,
837 armnn::IOutputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +0100838{
839 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100840 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
841 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100842
843 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
844
845 // assuming there is only one producer for that tensor
846 if (tensorSlots.outputSlot != nullptr)
847 {
James Ward58dec6b2020-09-11 17:32:44 +0100848 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
849 "subgraph:{} tensor:{} {}",
850 subgraphIndex,
851 tensorIndex,
852 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100853 }
854
855 tensorSlots.outputSlot = slot;
856}
857
Kevin May7d96b162021-02-03 17:38:41 +0000858void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
859 size_t tensorIndex,
860 armnn::IInputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +0100861{
862 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100863 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
864 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100865
Finn Williamsd4fa5452021-03-01 12:31:41 +0000866 TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +0100867 tensorSlots.inputSlots.push_back(slot);
868}
869
Kevin May7d96b162021-02-03 17:38:41 +0000870void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100871{
872 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
873
874 // NOTE: By default we presume the custom operator is not supported
Kevin May7d96b162021-02-03 17:38:41 +0000875 auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100876
877 // Identify custom code defined for custom operator
878 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
879 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
880
881 // Find parser function that correspondes to custom code (if any)
882 auto iterator = m_CustomParserFunctions.find(customCode);
883 if (iterator != m_CustomParserFunctions.end())
884 {
885 customParserFunction = iterator->second;
886 }
887
888 // Run parser function
889 (this->*customParserFunction)(subgraphIndex, operatorIndex);
890}
891
Kevin May7d96b162021-02-03 17:38:41 +0000892void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100893{
894 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100895
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100896 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
897
898 auto opcodeIndex = operatorPtr->opcode_index;
Jim Flynnfca233e2021-09-23 12:16:53 +0100899
900// work around the introduction of the deprecated_builtin_code introduced in 2.4 in a backwards compatible manner
901#if TF_MAJOR_VERSION > 2 || (TF_MAJOR_VERSION == 2 && TF_MINOR_VERSION > 3)
902 auto opcode = std::max(m_Model->operator_codes[opcodeIndex]->builtin_code,
903 static_cast<tflite::BuiltinOperator>(m_Model->operator_codes[opcodeIndex]->deprecated_builtin_code));
904#else
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100905 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
Jim Flynnfca233e2021-09-23 12:16:53 +0100906#endif
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100907
908 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
909 {
910 // Do not add StandInLayer, throw ParseException instead
911 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100912 fmt::format("Operator not supported. "
913 "subgraph:{} operator:{} "
914 "opcode_index:{} opcode:{} / {} {}",
915 subgraphIndex,
916 operatorIndex,
917 opcodeIndex,
918 opcode,
919 tflite::EnumNameBuiltinOperator(opcode),
920 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100921 }
922
923 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
924 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
925
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100926 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
927 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100928
929 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +0100930 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100931
932 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
933 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +0100934 ARMNN_ASSERT(layer != nullptr);
935
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100936 for (unsigned int i = 0u; i < numOutputs; ++i)
937 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100938 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100939 }
940
941 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
942 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
943
944 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
945 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100946}
947
mathad01b392e982021-04-07 12:07:30 +0100948void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex)
949{
950 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
951
952 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
953 CHECK_VALID_SIZE(inputs.size(), 1);
954 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
955 CHECK_VALID_SIZE(outputs.size(), 1);
956
957 auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex);
958
959 IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str());
960 ARMNN_ASSERT(layer != nullptr);
961
962 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
963 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
964
965 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
966 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
967
968 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
969 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
970}
971
Kevin May7d96b162021-02-03 17:38:41 +0000972void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100973{
974 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
975
976 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
977 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
978
979 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
980
981 Convolution2dDescriptor desc;
982 desc.m_BiasEnabled = false;
983 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
984 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000985 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100986 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
987 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000988
telsoa01c577f2c2018-08-31 09:22:23 +0100989 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
990 CHECK_VALID_SIZE(inputs.size(), 2, 3);
991
992 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
993 CHECK_VALID_SIZE(outputs.size(), 1);
994
995 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
996 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
997
998 // assuming input is NHWC
999 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1000 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1001
1002 // assuming the filter is OHWI : Output, H, W, Input
1003 // which is essentially the same as NHWC
1004 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1005 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1006
Pablo Tellof0bd6832019-04-26 17:58:13 +01001007 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1008 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1009 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1010 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001011
Finn Williamsd4fa5452021-03-01 12:31:41 +00001012 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001013 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +01001014
James Ward58dec6b2020-09-11 17:32:44 +01001015 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001016
1017 if (inputs.size() == 3)
1018 {
1019 desc.m_BiasEnabled = true;
1020 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00001021 auto biasTensorAndData = CreateConstTensorNonPermuted(inputs[2], biasTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001022 layer = m_Network->AddConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001023 filterTensorAndData,
1024 Optional<ConstTensor>(biasTensorAndData),
telsoa01c577f2c2018-08-31 09:22:23 +01001025 layerName.c_str());
1026 }
1027 else
1028 {
1029 layer = m_Network->AddConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001030 filterTensorAndData,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001031 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +01001032 layerName.c_str());
1033 }
1034
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001035 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001036
Sadik Armagand109a4d2020-07-28 10:42:13 +01001037 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +00001038 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001039
1040 // register the input connection slots for the layer, connections are made after all layers have been created
1041 // only the tensors for the inputs are relevant, exclude the const tensors
1042 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001043 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +01001044
jimfly01c25411c2018-11-14 17:47:22 +00001045 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001046 // register the output connection slots for the layer, connections are made after all layers have been created
1047 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1048 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1049}
1050
Kevin May7d96b162021-02-03 17:38:41 +00001051void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001052{
1053 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1054
1055 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1056 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
1057
1058 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1059
1060 DepthwiseConvolution2dDescriptor desc;
1061 desc.m_BiasEnabled = false;
1062 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1063 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001064 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001065 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +01001066
1067 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1068 CHECK_VALID_SIZE(inputs.size(), 2, 3);
1069 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1070 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +01001071 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1072 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001073
telsoa01c577f2c2018-08-31 09:22:23 +01001074 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Jan Eilers7612bd62021-04-06 17:29:03 +01001075 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
telsoa01c577f2c2018-08-31 09:22:23 +01001076
Matteo Martincigh747ef822018-12-18 09:26:39 +00001077 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +01001078 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1079 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +00001080
1081 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +01001082 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1083 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1084
Pablo Tellof0bd6832019-04-26 17:58:13 +01001085 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1086 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1087 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1088 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001089
Jan Eilers53ef7952021-06-02 12:01:25 +01001090 // ArmNN uses the same filter tensor layout at TfLite [1, H, W, O] no need for any permutation
1091 auto filterTensor = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001092 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001093 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001094
1095 if (inputs.size() == 3)
1096 {
1097 desc.m_BiasEnabled = true;
1098 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00001099 auto biasTensorAndData = CreateConstTensorNonPermuted(inputs[2], biasTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001100 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
Jan Eilers53ef7952021-06-02 12:01:25 +01001101 filterTensor,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001102 Optional<ConstTensor>(biasTensorAndData),
telsoa01c577f2c2018-08-31 09:22:23 +01001103 layerName.c_str());
1104 }
1105 else
1106 {
1107 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
Jan Eilers53ef7952021-06-02 12:01:25 +01001108 filterTensor,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001109 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +01001110 layerName.c_str());
1111 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001112 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001113
Sadik Armagand109a4d2020-07-28 10:42:13 +01001114 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +00001115 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001116
1117 // register the input connection slots for the layer, connections are made after all layers have been created
1118 // only the tensors for the inputs are relevant, exclude the const tensors
1119 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001120 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +01001121
jimfly01c25411c2018-11-14 17:47:22 +00001122 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001123 // register the output connection slots for the layer, connections are made after all layers have been created
1124 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1125 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1126}
1127
Kevin May7d96b162021-02-03 17:38:41 +00001128void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsed66d142019-12-06 09:55:55 +00001129{
1130 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1131
1132 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1133 CHECK_VALID_SIZE(inputs.size(), 1);
1134
1135 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1136 CHECK_VALID_SIZE(outputs.size(), 1);
1137
James Ward58dec6b2020-09-11 17:32:44 +01001138 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +00001139
1140 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001141 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +00001142
Sadik Armagand109a4d2020-07-28 10:42:13 +01001143 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Finn Williamsed66d142019-12-06 09:55:55 +00001144 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1145
1146 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1147 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1148
1149 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1150 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1151}
1152
Teresa Charlin3ab85482021-06-08 16:59:29 +01001153void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorIndex)
1154{
1155 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1156
1157 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1158 CHECK_VALID_SIZE(inputs.size(), 2);
1159
1160 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1161 CHECK_VALID_SIZE(outputs.size(), 1);
1162
1163 auto layerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
1164
1165 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1166 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1167
1168 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1169
1170 ReshapeDescriptor reshapeDesc;
Finn Williamsb49ed182021-06-29 15:50:08 +01001171
1172 if (outputTensorInfo.GetShape().AreAllDimensionsSpecified())
1173 {
1174 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1175 }
1176 else
1177 {
1178 int32_t axis = inputs[1]->shape[0];
1179
1180 int32_t inputDimSize = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
1181
1182 if (axis > inputDimSize || axis < 0 - (inputDimSize + 1))
1183 {
1184 throw ParseException("axis must be in range [0 - (inputDimSize + 1), inputDimSize] inclusive");
1185 }
1186
1187 if(axis < 0)
1188 {
1189 axis = inputDimSize + axis + 1;
1190 }
1191
Rob Hughesd812a312021-08-06 13:10:53 +01001192 std::vector<unsigned int> shape(static_cast<unsigned int>(inputDimSize) + 1);
Finn Williamsb49ed182021-06-29 15:50:08 +01001193 unsigned int inputShapeIndex = 0;
1194 for (unsigned int i = 0; i < static_cast<unsigned int>(inputDimSize + 1); ++i)
1195 {
1196 if (i == static_cast<unsigned int>(axis))
1197 {
1198 shape[i] = 1;
1199 }
1200 else
1201 {
1202 shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
1203 ++inputShapeIndex;
1204 }
1205 }
1206
Rob Hughesd812a312021-08-06 13:10:53 +01001207 reshapeDesc.m_TargetShape = TensorShape(static_cast<unsigned int>(inputDimSize + 1), shape.data());
Finn Williamsb49ed182021-06-29 15:50:08 +01001208 }
Teresa Charlin3ab85482021-06-08 16:59:29 +01001209
1210 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1211 ARMNN_ASSERT(layer != nullptr);
1212 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1213
1214 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1215 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1216
1217 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1218 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1219}
1220
Kevin May7d96b162021-02-03 17:38:41 +00001221void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
Keith Davis4cd29a02019-09-09 14:49:20 +01001222{
1223 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1224
1225 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001226 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001227
1228 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1229 CHECK_VALID_SIZE(outputs.size(), 1);
1230
James Ward58dec6b2020-09-11 17:32:44 +01001231 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001232 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001233
josh minorba424d22019-11-13 10:55:17 -06001234 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001235 {
1236 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1237 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001238 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1239 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001240 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001241 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001242
Mike Kelly08759e22020-03-02 11:41:31 +00001243 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001244 }
1245
James Conroy05102392020-06-24 15:39:55 +01001246 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001247 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001248 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001249
James Conroy05102392020-06-24 15:39:55 +01001250 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001251 ARMNN_ASSERT(layer != nullptr);
Keith Davis4cd29a02019-09-09 14:49:20 +01001252 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1253
1254 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1255 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1256
1257 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1258 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1259}
1260
Kevin May7d96b162021-02-03 17:38:41 +00001261void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001262{
1263 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1264
1265 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1266 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1267
1268 TransposeConvolution2dDescriptor desc;
1269 desc.m_BiasEnabled = false;
1270 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1271 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1272 desc.m_DataLayout = armnn::DataLayout::NHWC;
1273
1274 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
David Monahan61683802021-01-12 09:11:07 +00001275 if (inputs.size() == 4)
1276 {
1277 desc.m_BiasEnabled = true;
1278 }
1279 else
1280 {
1281 CHECK_VALID_SIZE(inputs.size(), 3);
1282 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001283
1284 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1285 CHECK_VALID_SIZE(outputs.size(), 1);
1286
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001287 if (inputs[0])
1288 {
1289 armnn::TensorInfo tensorInfo = ToTensorInfo(inputs[0]);
1290 std::vector<int> output_shape(tensorInfo.GetNumElements());
1291 if (tensorInfo.GetDataType() == DataType::Signed32)
1292 {
1293 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1294 }
1295 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1296 {
1297 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1298 {
1299 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1300 }
1301 }
1302 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1303 for (int dimension : output_shape)
1304 {
1305 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1306 }
1307 desc.m_OutputShapeEnabled = true;
1308 }
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001309 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001310 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1311
1312 // TfLite uses NHWC tensors
1313 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1314 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1315
1316 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1317 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1318
1319 CalcPadding(inputHeight,
1320 filterHeight,
1321 desc.m_StrideY,
1322 1, // DilationY
1323 desc.m_PadTop,
1324 desc.m_PadBottom,
1325 options->padding);
1326
1327 CalcPadding(inputWidth,
1328 filterWidth,
1329 desc.m_StrideX,
1330 1, // DilationX
1331 desc.m_PadLeft,
1332 desc.m_PadRight,
1333 options->padding);
1334
Finn Williamsd4fa5452021-03-01 12:31:41 +00001335 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001336
1337 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001338 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001339
David Monahan61683802021-01-12 09:11:07 +00001340 if (desc.m_BiasEnabled)
1341 {
1342 auto biasTensorInfo = ToTensorInfo(inputs[3]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00001343 auto biasConstTensor = CreateConstTensorNonPermuted(inputs[3], biasTensorInfo);
David Monahan61683802021-01-12 09:11:07 +00001344 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001345 filterTensorAndData,
1346 biasConstTensor,
David Monahan61683802021-01-12 09:11:07 +00001347 layerName.c_str());
1348 }
1349 else
1350 {
1351 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001352 filterTensorAndData,
David Monahan61683802021-01-12 09:11:07 +00001353 EmptyOptional(),
1354 layerName.c_str());
1355 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001356
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001357 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001358
Sadik Armagand109a4d2020-07-28 10:42:13 +01001359 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001360 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1361
1362 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1363 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001364 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001365
1366 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1367 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1368}
1369
Kevin May7d96b162021-02-03 17:38:41 +00001370void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001371{
1372 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1373}
1374
Kevin May7d96b162021-02-03 17:38:41 +00001375void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001376{
1377 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1378
1379 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1380 CHECK_VALID_SIZE(inputs.size(), 3);
1381
1382 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1383 CHECK_VALID_SIZE(outputs.size(), 1);
1384
1385 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1386 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1387
1388 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1389 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1390
1391 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1392 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1393
1394 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1395 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1396
1397 size_t step = 2;
1398 std::vector<std::pair<unsigned int, unsigned int>> crops;
1399 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1400 {
1401 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1402 }
1403
1404 armnn::BatchToSpaceNdDescriptor desc;
1405 desc.m_BlockShape = blockShape;
1406 desc.m_Crops = crops;
1407 desc.m_DataLayout = armnn::DataLayout::NHWC;
1408
James Ward58dec6b2020-09-11 17:32:44 +01001409 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001410
James Conroy05102392020-06-24 15:39:55 +01001411 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001412 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001413 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1414
1415 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1416 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001417 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1418
1419 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1420 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1421
1422 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1423 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1424}
1425
Kevin May7d96b162021-02-03 17:38:41 +00001426void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson28c94572019-07-18 10:47:03 +01001427{
1428 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1429
1430 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1431 CHECK_VALID_SIZE(inputs.size(), 1);
1432
1433 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1434 CHECK_VALID_SIZE(outputs.size(), 1);
1435
1436 L2NormalizationDescriptor desc;
1437 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001438 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001439 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1440
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001441 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001442
Sadik Armagand109a4d2020-07-28 10:42:13 +01001443 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson28c94572019-07-18 10:47:03 +01001444 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1445
1446 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1447 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1448
1449 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1450 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1451}
1452
Kevin May7d96b162021-02-03 17:38:41 +00001453void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001454{
1455 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1456}
1457
Kevin May7d96b162021-02-03 17:38:41 +00001458void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001459{
1460 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1461
1462 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1463 CHECK_VALID_SIZE(inputs.size(), 2);
1464
1465 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1466 CHECK_VALID_SIZE(outputs.size(), 1);
1467
James Ward58dec6b2020-09-11 17:32:44 +01001468 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001469
1470 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1471 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1472 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001473
Sadik Armagand109a4d2020-07-28 10:42:13 +01001474 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001475 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1476
1477 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1478 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001479 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1480
1481 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001482 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001483
1484 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1485 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1486}
1487
Kevin May7d96b162021-02-03 17:38:41 +00001488void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001489{
1490 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1491
1492 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1493 CHECK_VALID_SIZE(inputs.size(), 2);
1494
1495 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1496 CHECK_VALID_SIZE(outputs.size(), 1);
1497
James Ward58dec6b2020-09-11 17:32:44 +01001498 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001499
1500 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1501 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1502 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001503
Sadik Armagand109a4d2020-07-28 10:42:13 +01001504 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001505 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1506
1507 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1508 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001509 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1510
1511 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001512 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001513
1514 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1515 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1516}
1517
Kevin May7d96b162021-02-03 17:38:41 +00001518void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
1519 size_t operatorIndex,
1520 PoolingAlgorithm algorithm)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001521{
1522 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1523
1524 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1525 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1526
1527 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1528
1529 std::string layerName;
1530
1531 switch (algorithm)
1532 {
1533 case PoolingAlgorithm::Average:
1534 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001535 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001536 break;
1537 case PoolingAlgorithm::Max:
1538 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001539 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001540 break;
1541 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001542 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001543 }
1544
1545 Pooling2dDescriptor desc;
1546
1547 desc.m_PoolType = algorithm;
1548 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1549 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1550 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1551 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1552 desc.m_PaddingMethod = PaddingMethod::Exclude;
1553 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001554 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001555
1556 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1557 CHECK_VALID_SIZE(inputs.size(), 1);
1558 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1559
1560 // assuming input is NHWC
1561 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1562 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1563
Pablo Tellof0bd6832019-04-26 17:58:13 +01001564 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1565 desc.m_PadTop, desc.m_PadBottom, options->padding);
1566 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1567 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001568
1569 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1570 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001571
Sadik Armagand109a4d2020-07-28 10:42:13 +01001572 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001573 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1574
1575 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1576 ARMNN_ASSERT(layer != nullptr);
jimfly01c25411c2018-11-14 17:47:22 +00001577 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001578
1579 // register the input connection slots for the layer, connections are made after all layers have been created
1580 // only the tensors for the inputs are relevant, exclude the const tensors
1581 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001582 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001583
jimfly01c25411c2018-11-14 17:47:22 +00001584 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001585 // register the output connection slots for the layer, connections are made after all layers have been created
1586 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1587 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1588}
1589
Kevin May7d96b162021-02-03 17:38:41 +00001590void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
josh minorba424d22019-11-13 10:55:17 -06001591{
1592 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1593
1594 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1595 CHECK_VALID_SIZE(inputs.size(), 3);
1596 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1597 CHECK_VALID_SIZE(outputs.size(), 1);
1598
1599 SliceDescriptor desc;
1600
1601 // set begin tensor info for slice descriptor
1602 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1603 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1604
1605 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1606 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1607
1608 // set size tensor info for slice descriptor
1609 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1610 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1611
Mike Kelly7ba84d62021-09-10 15:27:19 +01001612 std::vector<int> signedSize(sizeTensorInfo.GetNumElements());
1613 ::memcpy(signedSize.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
josh minorba424d22019-11-13 10:55:17 -06001614 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
Mike Kelly7ba84d62021-09-10 15:27:19 +01001615 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1616
1617 for (unsigned int i = 0; i < signedSize.size(); ++i)
1618 {
1619 int signedValue = signedSize[i];
Jim Flynnfca233e2021-09-23 12:16:53 +01001620
Mike Kelly7ba84d62021-09-10 15:27:19 +01001621 if (signedValue < -1 || signedValue > static_cast<int>(inputTensorInfo.GetShape()[i] - begin[i]))
1622 {
1623 throw ParseException(fmt::format("Invalid value for size {} size must be in range "
1624 "[-1, inputDimSize - begin] [-1, {}] inclusive {}",
1625 signedValue,
1626 inputTensorInfo.GetShape()[i] - begin[i],
1627 CHECK_LOCATION().AsString()));
1628 }
1629
1630 if (signedValue == -1)
1631 {
1632 size[i] = inputTensorInfo.GetShape()[i] - begin[i];
1633 }
1634 else
1635 {
1636 size[i] = static_cast<unsigned int>(signedValue);
1637 }
1638 }
1639
josh minorba424d22019-11-13 10:55:17 -06001640 desc = SliceDescriptor(begin, size);
1641
James Ward58dec6b2020-09-11 17:32:44 +01001642 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06001643
Sadik Armagand109a4d2020-07-28 10:42:13 +01001644 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001645 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1646
1647 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
josh minorba424d22019-11-13 10:55:17 -06001648 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1649
1650 // register the input connection slots for the layer, connections are made after all layers have been created
1651 // only the tensors for the inputs are relevant, exclude the const tensors
1652 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1653 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1654
1655 // register the output connection slots for the layer, connections are made after all layers have been created
1656 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1657 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1658}
1659
Kevin May7d96b162021-02-03 17:38:41 +00001660void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001661{
1662 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1663 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1664 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1665
1666 SoftmaxDescriptor desc;
1667 desc.m_Beta = options->beta;
1668
1669 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1670 CHECK_VALID_SIZE(inputs.size(), 1);
1671 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1672 CHECK_VALID_SIZE(outputs.size(), 1);
1673
James Ward58dec6b2020-09-11 17:32:44 +01001674 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001675 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1676
Sadik Armagand109a4d2020-07-28 10:42:13 +01001677 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
telsoa01c577f2c2018-08-31 09:22:23 +01001678 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1679
1680 // register the input connection slots for the layer, connections are made after all layers have been created
1681 // only the tensors for the inputs are relevant, exclude the const tensors
1682 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1683 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1684
1685 // register the output connection slots for the layer, connections are made after all layers have been created
1686 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1687 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1688}
1689
Kevin May7d96b162021-02-03 17:38:41 +00001690void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001691{
1692 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1693
1694 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1695 CHECK_VALID_SIZE(inputs.size(), 3);
1696
1697 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1698 CHECK_VALID_SIZE(outputs.size(), 1);
1699
1700 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1701 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1702
1703 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1704 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1705
1706 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1707 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1708
1709 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1710 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1711
1712 size_t step = 2;
1713 std::vector<std::pair<unsigned int, unsigned int>> padList;
1714 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1715 {
1716 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1717 }
1718
1719 armnn::SpaceToBatchNdDescriptor desc;
1720 desc.m_BlockShape = blockShape;
1721 desc.m_PadList = padList;
1722 desc.m_DataLayout = armnn::DataLayout::NHWC;
1723
James Ward58dec6b2020-09-11 17:32:44 +01001724 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001725
James Conroy05102392020-06-24 15:39:55 +01001726 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001727 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001728 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1729
1730 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1731 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001732 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1733
1734 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1735 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1736
1737 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1738 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1739}
1740
Teresa Charlin3ab85482021-06-08 16:59:29 +01001741armnn::TensorInfo TfLiteParserImpl::OutputShapeOfSqueeze(std::vector<uint32_t> squeezeDims,
Kevin May7d96b162021-02-03 17:38:41 +00001742 const armnn::TensorInfo & inputTensorInfo)
telsoa01c577f2c2018-08-31 09:22:23 +01001743{
Teresa Charlin3ab85482021-06-08 16:59:29 +01001744 CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
telsoa01c577f2c2018-08-31 09:22:23 +01001745 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1746
1747 if (inputTensorInfo.GetNumDimensions() > 4)
1748 {
1749 std::stringstream ss;
1750 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1751 << " shape:" << inputTensorInfo.GetShape() << " "
1752 << CHECK_LOCATION().AsString();
1753 throw ParseException(ss.str());
1754 }
1755
1756 if (squeezeDims.empty())
1757 {
1758 squeezeDims.assign(dimensionSequence,
1759 dimensionSequence+inputTensorInfo.GetNumDimensions());
1760 }
1761
1762 std::vector<uint32_t> outputDims;
1763 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1764 {
1765 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1766 auto currentDimension = inputTensorInfo.GetShape()[i];
1767 if (skipSqueeze || currentDimension != 1)
1768 {
1769 outputDims.push_back(currentDimension);
1770 }
1771 }
1772
1773 if (outputDims.size() > 4)
1774 {
1775 std::stringstream ss;
1776 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1777 << " shape:" << inputTensorInfo.GetShape() << " "
1778 << CHECK_LOCATION().AsString();
1779 throw ParseException(ss.str());
1780 }
1781
1782 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1783 outputDims.data());
1784
1785 // we need to preserve the tensor type and the quantization data as well
1786 TensorInfo outTensorInfo = inputTensorInfo;
1787 outTensorInfo.SetShape(outShape);
1788
1789 return outTensorInfo;
1790}
1791
Keith Davis0176fd82021-06-01 17:36:32 +01001792void TfLiteParserImpl::ParseShape(size_t subgraphIndex, size_t operatorIndex)
1793{
1794 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1795
1796 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1797 CHECK_VALID_SIZE(inputs.size(), 1);
1798 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1799 CHECK_VALID_SIZE(outputs.size(), 1);
1800
1801 auto layerName = fmt::format("Shape:{}:{}", subgraphIndex, operatorIndex);
1802
1803 IConnectableLayer* layer = m_Network->AddShapeLayer(layerName.c_str());
1804 ARMNN_ASSERT(layer != nullptr);
1805
1806
1807 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1808 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1809
1810 // Check if output tensor type is Signed32 or Signed64
1811 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
1812 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
1813 {
1814 throw ParseException(
1815 fmt::format(
1816 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
1817 CHECK_LOCATION().AsString()));
1818 }
1819
1820 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1821 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1822
1823 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1824 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1825}
1826
Kevin May7d96b162021-02-03 17:38:41 +00001827void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001828{
1829 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1830
1831 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1832 CHECK_VALID_SIZE(inputs.size(), 1);
1833
1834 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1835 CHECK_VALID_SIZE(outputs.size(), 1);
1836
1837 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1838 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01001839 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001840
1841 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001842
1843 std::vector<uint32_t> squeezeDim;
1844 // A single negative dim index is interpreted as a negative index in python
1845 // Meaning the index will be the shape size plus the negative index value
1846 if (options->squeeze_dims.size() == 1 && options->squeeze_dims[0] < 0)
1847 {
1848 int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
1849 squeezeDim.push_back(static_cast<uint32_t>(dim));
1850 }
1851 else
1852 {
1853 squeezeDim = AsUnsignedVector(options->squeeze_dims);
1854 }
1855
1856 armnn::TensorInfo outputTensorInfo = TfLiteParserImpl::OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
1857
James Conroy05102392020-06-24 15:39:55 +01001858 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01001859
1860 ReshapeDescriptor reshapeDesc;
1861 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1862
telsoa01c577f2c2018-08-31 09:22:23 +01001863 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001864 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001865 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1866
1867 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1868 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1869
1870 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1871 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1872}
1873
Kevin May7d96b162021-02-03 17:38:41 +00001874void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001875{
1876 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1877
1878 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1879 CHECK_VALID_SIZE(inputs.size(), 4);
1880
1881 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1882 CHECK_VALID_SIZE(outputs.size(), 1);
1883
1884 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1885 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1886
1887 StridedSliceDescriptor desc;
1888 desc.m_BeginMask = options->begin_mask;
1889 desc.m_EllipsisMask = options->ellipsis_mask;
1890 desc.m_EndMask = options->end_mask;
1891 desc.m_NewAxisMask = options->new_axis_mask;
1892 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1893 desc.m_DataLayout = armnn::DataLayout::NHWC;
1894
1895 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1896 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1897
1898 std::vector<int> begin(beginTensorInfo.GetNumElements());
1899 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1900
1901 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1902 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1903
1904 std::vector<int> end(endTensorInfo.GetNumElements());
1905 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1906
1907 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1908 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1909
1910 std::vector<int> stride(strideTensorInfo.GetNumElements());
1911 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1912
1913 desc.m_Begin = begin;
1914 desc.m_End = end;
1915 desc.m_Stride = stride;
1916
James Ward58dec6b2020-09-11 17:32:44 +01001917 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001918 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001919 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001920
Sadik Armagand109a4d2020-07-28 10:42:13 +01001921 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001922 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1923
1924 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1925 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1926
1927 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1928 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1929}
1930
Kevin May7d96b162021-02-03 17:38:41 +00001931void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001932{
1933 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1934
1935 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1936 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1937
1938 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1939 CHECK_VALID_SIZE(inputs.size(), 2);
1940
1941 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1942 CHECK_VALID_SIZE(outputs.size(), 1);
1943
1944 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1945 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1946
James Ward58dec6b2020-09-11 17:32:44 +01001947 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001948 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001949 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001950
Sadik Armagand109a4d2020-07-28 10:42:13 +01001951 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001952 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1953
1954 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001955 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001956
1957 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1958
1959 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1960 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1961}
1962
Kevin May7d96b162021-02-03 17:38:41 +00001963void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301964{
1965 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1966
1967 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1968 const auto * options = operatorPtr->builtin_options.AsDivOptions();
1969
1970 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1971 CHECK_VALID_SIZE(inputs.size(), 2);
1972
1973 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1974 CHECK_VALID_SIZE(outputs.size(), 1);
1975
1976 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1977 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1978
James Ward58dec6b2020-09-11 17:32:44 +01001979 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301980 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001981 ARMNN_ASSERT(layer != nullptr);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301982
Sadik Armagand109a4d2020-07-28 10:42:13 +01001983 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301984 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1985
1986 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001987 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301988 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1989
1990 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1991 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1992}
1993
Kevin May7d96b162021-02-03 17:38:41 +00001994void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001995{
1996 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1997
1998 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1999 const auto * options = operatorPtr->builtin_options.AsAddOptions();
2000
2001 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2002 CHECK_VALID_SIZE(inputs.size(), 2);
2003
2004 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2005 CHECK_VALID_SIZE(outputs.size(), 1);
2006
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002007 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2008 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
2009
James Ward58dec6b2020-09-11 17:32:44 +01002010 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002011 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002012 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002013
Sadik Armagand109a4d2020-07-28 10:42:13 +01002014 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002015 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2016
2017 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002018 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02002019 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2020
2021 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2022 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2023}
2024
Kevin May7d96b162021-02-03 17:38:41 +00002025void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002026{
2027 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2028
2029 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2030 const auto * options = operatorPtr->builtin_options.AsMulOptions();
2031
2032 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2033 CHECK_VALID_SIZE(inputs.size(), 2);
2034
2035 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2036 CHECK_VALID_SIZE(outputs.size(), 1);
2037
Bruno Goncalves9c761a62018-12-27 14:20:35 -02002038 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2039 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
2040
James Ward58dec6b2020-09-11 17:32:44 +01002041 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002042 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002043 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002044
Sadik Armagand109a4d2020-07-28 10:42:13 +01002045 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002046 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2047
2048 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002049 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002050 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2051
2052 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2053 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2054}
2055
Kevin May7d96b162021-02-03 17:38:41 +00002056void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002057{
2058 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2059
2060 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2061
2062 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2063 CHECK_VALID_SIZE(outputs.size(), 1);
2064
2065 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
2066 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2067
2068 armnn::MeanDescriptor desc;
2069 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
2070 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
2071 desc.m_Axis = axis;
2072
2073 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002074 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002075
2076 desc.m_KeepDims =
2077 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
2078 true : false;
2079
James Ward58dec6b2020-09-11 17:32:44 +01002080 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002081 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002082 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002083
2084 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2085
2086 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2087 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2088
2089 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2090 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2091}
2092
Kevin May7d96b162021-02-03 17:38:41 +00002093void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002094{
2095 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2096
Kevin May7d96b162021-02-03 17:38:41 +00002097 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002098
Kevin May7d96b162021-02-03 17:38:41 +00002099 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002100 CHECK_VALID_SIZE(outputs.size(), 1);
2101
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002102 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2103
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002104 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
2105 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2106
2107 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
2108 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
2109
2110 size_t step = 2;
2111 armnn::PadDescriptor desc;
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002112 if (inputTensorInfo.IsQuantized())
2113 {
2114 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2115 }
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002116 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2117 {
2118 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2119 }
2120
James Ward58dec6b2020-09-11 17:32:44 +01002121 auto layerName = fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002122 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002123
2124 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
2125 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002126 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2127
2128 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2129 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2130
2131 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2132 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2133}
2134
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002135void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
2136{
2137 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2138
2139 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2140 CHECK_VALID_SIZE(inputs.size(), 2);
2141
2142 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2143 CHECK_VALID_SIZE(outputs.size(), 1);
2144
2145 auto layerName = fmt::format("Prelu:{}:{}", subgraphIndex, operatorIndex);
2146
2147 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2148 armnn::TensorInfo alphaTensorInfo = ToTensorInfo(inputs[1]);
2149 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
2150 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2151
2152 IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
2153 ARMNN_ASSERT(layer != nullptr);
2154 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2155
2156 if (IsConstTensor(inputs[1]))
2157 {
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002158 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawaratbf99b5f2021-05-27 09:55:43 +01002159 armnn::IInputSlot* slot = &(layer->GetInputSlot(0));
2160 RegisterConsumerOfTensor(subgraphIndex, inputTensorIndexes[0], slot);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002161
2162 auto alphaTensorAndData = CreateConstTensorNonPermuted(inputs[1], alphaTensorInfo);
2163 std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name);
2164 IConnectableLayer* constLayer =
2165 m_Network->AddConstantLayer(alphaTensorAndData, constLayerName.c_str());
2166 ARMNN_ASSERT(constLayer != nullptr);
2167
2168 constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
2169 constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
2170 RegisterOutputSlots(subgraphIndex,
2171 VIRTUAL_OPERATOR_ID,
2172 constLayer,
2173 { inputTensorIndexes[1] });
2174 }
2175 else
2176 {
2177 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2178 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIndexes);
2179 }
2180
2181 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2182 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2183}
2184
Kevin May7d96b162021-02-03 17:38:41 +00002185void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan66dedc72019-12-10 16:32:07 +00002186{
2187 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2188
2189 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2190 CHECK_VALID_SIZE(inputs.size(), 1);
2191
2192 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2193 CHECK_VALID_SIZE(outputs.size(), 1);
2194
James Ward58dec6b2020-09-11 17:32:44 +01002195 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002196
2197 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002198 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002199
Sadik Armagand109a4d2020-07-28 10:42:13 +01002200 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002201 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2202
2203 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2204 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2205
2206 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2207 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2208}
Finn Williamsc42c3842019-01-22 14:18:11 +00002209
Kevin May7d96b162021-02-03 17:38:41 +00002210void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002211{
Finn Williamsc42c3842019-01-22 14:18:11 +00002212 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01002213}
2214
Kevin May7d96b162021-02-03 17:38:41 +00002215void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002216{
Finn Williamsc42c3842019-01-22 14:18:11 +00002217 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
2218}
Sadik Armagan58f39192018-09-17 14:14:39 +01002219
Kevin May7d96b162021-02-03 17:38:41 +00002220void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan12239e72020-05-27 11:06:17 +01002221{
Jan Eilers2f746b32020-07-28 14:00:06 +01002222 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01002223}
2224
Kevin May7d96b162021-02-03 17:38:41 +00002225void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsc42c3842019-01-22 14:18:11 +00002226{
2227 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
2228}
2229
Kevin May7d96b162021-02-03 17:38:41 +00002230void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd99851762019-04-09 09:37:38 +01002231{
2232 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
2233}
2234
Kevin May7d96b162021-02-03 17:38:41 +00002235void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
Matthew Sloyan7515d072020-12-16 12:50:01 +00002236{
2237 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
2238}
2239
Kevin May7d96b162021-02-03 17:38:41 +00002240void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
Jan Eilers2f746b32020-07-28 14:00:06 +01002241{
2242 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
2243}
Finn Williamsc42c3842019-01-22 14:18:11 +00002244
Kevin May7d96b162021-02-03 17:38:41 +00002245void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
Finn Williamsc42c3842019-01-22 14:18:11 +00002246{
2247 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01002248 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00002249 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01002250
2251 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2252 CHECK_VALID_SIZE(inputs.size(), 1);
2253
2254 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2255 CHECK_VALID_SIZE(outputs.size(), 1);
2256
James Ward58dec6b2020-09-11 17:32:44 +01002257 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01002258 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00002259 activationDesc.m_Function = activationType;
2260
2261 switch (activationType)
2262 {
2263 case ActivationFunction::ReLu:
2264 {
James Ward58dec6b2020-09-11 17:32:44 +01002265 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002266 break;
2267 }
2268 case ActivationFunction::BoundedReLu:
2269 {
James Ward58dec6b2020-09-11 17:32:44 +01002270 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002271 activationDesc.m_A = 6.0f;
2272 activationDesc.m_B = 0.0f;
2273 break;
2274 }
2275 case ActivationFunction::Sigmoid:
2276 {
James Ward58dec6b2020-09-11 17:32:44 +01002277 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002278 break;
2279 }
Nina Drozd99851762019-04-09 09:37:38 +01002280 case ActivationFunction::TanH:
2281 {
James Ward58dec6b2020-09-11 17:32:44 +01002282 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01002283 activationDesc.m_A = 1.0f;
2284 activationDesc.m_B = 1.0f;
2285 break;
2286 }
Sadik Armagan12239e72020-05-27 11:06:17 +01002287 case ActivationFunction::LeakyReLu:
2288 {
James Ward58dec6b2020-09-11 17:32:44 +01002289 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan12239e72020-05-27 11:06:17 +01002290 const auto * options = operatorPtr->builtin_options.AsLeakyReluOptions();
2291 activationDesc.m_A = options->alpha;
2292 break;
2293 }
Matthew Sloyan7515d072020-12-16 12:50:01 +00002294 case ActivationFunction::Elu:
2295 {
2296 layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
2297 activationDesc.m_A = 1.0f;
2298 break;
2299 }
Jan Eilers2f746b32020-07-28 14:00:06 +01002300 case ActivationFunction::HardSwish:
Matthew Sloyan7515d072020-12-16 12:50:01 +00002301 {
James Ward58dec6b2020-09-11 17:32:44 +01002302 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01002303 break;
Matthew Sloyan7515d072020-12-16 12:50:01 +00002304 }
Finn Williamsc42c3842019-01-22 14:18:11 +00002305 default:
2306 {
2307 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002308 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
2309 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00002310 }
2311 }
2312
2313 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01002314
Sadik Armagand109a4d2020-07-28 10:42:13 +01002315 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan58f39192018-09-17 14:14:39 +01002316 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2317
2318 // register the input connection slots for the layer, connections are made after all layers have been created
2319 // only the tensors for the inputs are relevant, exclude the const tensors
2320 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2321 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2322
2323 // register the output connection slots for the layer, connections are made after all layers have been created
2324 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2325 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2326}
Kevin May7d96b162021-02-03 17:38:41 +00002327armnn::TensorInfo TfLiteParserImpl::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
2328 const std::vector<int32_t> & targetDimsIn)
Sadikb94967b2018-09-19 15:30:00 +01002329{
2330 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2331 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2332
2333 if (stretchDim != targetDimsIn.end())
2334 {
2335 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2336 {
2337 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002338 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01002339 }
2340
2341 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002342 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01002343 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2344
2345 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2346 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2347 }
2348
2349 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2350
2351 TensorInfo reshapeInfo = inputTensorInfo;
2352 reshapeInfo.SetShape(outputShape);
2353
2354 return reshapeInfo;
2355}
2356
Kevin May7d96b162021-02-03 17:38:41 +00002357void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
Sadikb94967b2018-09-19 15:30:00 +01002358{
2359 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2360
2361 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002362
2363 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2364 CHECK_VALID_SIZE(outputs.size(), 1);
2365
2366 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2367 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002368 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002369
2370 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00002371 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01002372 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00002373
Jan Eilersbac9b352020-07-13 13:40:24 +01002374 // Extracting new shape for the output
2375 // There are two ways it can be passed
2376 // * First is to define the target shape in the operator built-in options
2377 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00002378 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01002379 bool targetShapeFound = false;
2380 // Check if built-in options were given
2381 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00002382 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002383 // make sure the parameter is given
2384 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00002385 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002386 targetShape = options->new_shape;
2387 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00002388 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002389 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002390
2391 // If there is no built-in option given or if the built-in new_shape parameter was empty
2392 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00002393 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002394 // Check for a second input tensor
2395 if (inputs.size() > 1 && inputs[1] != nullptr)
2396 {
2397 if (inputs[1]->is_variable)
2398 {
2399 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
2400 }
2401
2402 if (inputs[1]->shape.size() != 1)
2403 {
2404 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
2405 }
2406
2407 if (inputs[1]->type != tflite::TensorType_INT32)
2408 {
2409 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
2410 }
2411
2412 // Extract target shape from input
2413 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2414 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
Sadik Armagan19a1c032021-01-20 12:17:00 +00002415 if (!values)
2416 {
2417 ARMNN_THROW_PARSE_EXCEPTION("Reshape operator target shape input buffer data is null");
2418 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002419 for (int i=0; i < inputs[1]->shape[0]; ++i)
2420 {
2421 targetShape.push_back(values[i]);
2422 }
2423 }
2424 else
Derek Lambertic9e52792020-03-11 11:42:26 +00002425 {
2426 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
2427 "At least one method required");
2428 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002429 }
2430
kevmay0171972a82018-12-17 14:28:03 +00002431 armnn::TensorInfo reshapeOutputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00002432 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01002433
kevmay0171972a82018-12-17 14:28:03 +00002434 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002435 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
2436 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00002437 {
2438 std::stringstream ss;
2439 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002440 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00002441 << " does not equal output shape "
2442 << actualOutputTensorInfo.GetShape()
2443 << ": "
2444 << CHECK_LOCATION().AsString();
2445 throw ParseException(ss.str());
2446 }
2447
Sadikb94967b2018-09-19 15:30:00 +01002448 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00002449 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01002450
Sadikb94967b2018-09-19 15:30:00 +01002451 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002452 ARMNN_ASSERT(layer != nullptr);
kevmay0171972a82018-12-17 14:28:03 +00002453 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01002454
2455 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2456 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2457
2458 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2459 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2460}
2461
Kevin May7d96b162021-02-03 17:38:41 +00002462void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002463{
Sadik Armagana3b31f02019-12-05 09:08:53 +00002464 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
2465}
2466
Kevin May7d96b162021-02-03 17:38:41 +00002467void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagana3b31f02019-12-05 09:08:53 +00002468{
2469 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
2470}
2471
Kevin May7d96b162021-02-03 17:38:41 +00002472void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
Sadik Armagana3b31f02019-12-05 09:08:53 +00002473{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002474 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2475
2476 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2477 CHECK_VALID_SIZE(inputs.size(), 2);
2478
2479 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2480 CHECK_VALID_SIZE(outputs.size(), 1);
2481
2482 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
2483
2484 // Data for the parsed tensor args (size) must be stored locally.
2485 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2486
2487 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2488 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2489
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002490 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002491 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002492 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002493 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2494 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002495
James Ward58dec6b2020-09-11 17:32:44 +01002496 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00002497
2498 switch (resizeMethod)
2499 {
2500 case ResizeMethod::Bilinear:
2501 {
James Ward58dec6b2020-09-11 17:32:44 +01002502 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002503
2504 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2505 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2506
David Monahan4a0c9b92020-05-30 09:48:39 +01002507 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002508 break;
2509 }
2510 case ResizeMethod::NearestNeighbor:
2511 {
James Ward58dec6b2020-09-11 17:32:44 +01002512 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00002513 break;
2514 }
2515 default:
2516 {
2517 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002518 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
2519 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00002520 }
2521 }
2522
James Conroy05102392020-06-24 15:39:55 +01002523 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002524 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002525 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2526
2527 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
2528 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002529 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2530
2531 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2532 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2533
2534 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2535 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2536}
2537
Kevin May7d96b162021-02-03 17:38:41 +00002538void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan479045b2018-10-01 11:51:37 +01002539{
2540 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2541
2542 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2543 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2544
2545 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2546
2547 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2548 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2549 CHECK_VALID_SIZE(outputs.size(), 1);
2550
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002551 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2552 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002553
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002554 const unsigned int concatDimInput = static_cast<unsigned int>(
2555 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002556
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002557 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2558 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002559
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002560 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002561
2562 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2563 {
2564 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2565
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002566 // This set up concatDescriptor view origin
2567 armnnUtils::ProcessConcatInputTensorInfo(
2568 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002569 }
2570
James Ward58dec6b2020-09-11 17:32:44 +01002571 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002572 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002573
Jim Flynn906f9462019-05-10 13:55:21 +01002574 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002575 ARMNN_ASSERT(layer != nullptr);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002576 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002577
James Conroy05102392020-06-24 15:39:55 +01002578 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002579 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002580
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002581 // add fused activation layer
2582 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002583
Sadik Armagan479045b2018-10-01 11:51:37 +01002584 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2585 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2586}
2587
Kevin May7d96b162021-02-03 17:38:41 +00002588void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002589{
2590 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2591
2592 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2593 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2594
2595 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2596
2597 FullyConnectedDescriptor desc;
2598 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002599 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002600
2601 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2602 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2603 CHECK_VALID_SIZE(outputs.size(), 1);
2604
2605 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2606
2607 // Fully Connected Layer accepts two dimensional weights input
2608 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2609 if (weightsDimension != 2)
2610 {
2611 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002612 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
2613 "Node {}",
2614 weightsDimension,
2615 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002616 }
2617
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002618 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01002619 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002620
Matthew Sloyan81beae32021-07-13 19:46:11 +01002621 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2622 // Add the first input tensor to the registration list
2623 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0]};
2624 std::vector<unsigned int> ignoreInputWhenRegister = {};
Finn Williamsd4fa5452021-03-01 12:31:41 +00002625
2626 desc.m_ConstantWeights = IsConstTensor(inputs[1]);
2627
Matthew Sloyan81beae32021-07-13 19:46:11 +01002628 // Add the weights input to the registration list, constant layers will be added by SetupConstantLayers if constant.
2629 tensorIndexesToRegister.emplace_back(inputTensorIndexes[1]);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002630
Finn Williamsd4fa5452021-03-01 12:31:41 +00002631 if (inputs.size() == 3)
2632 {
2633 desc.m_BiasEnabled = true;
Matthew Sloyan81beae32021-07-13 19:46:11 +01002634
2635 // Add the biases input to the registration list, constant layer will be added by SetupConstantLayers.
2636 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00002637 }
2638
Matthew Sloyan81beae32021-07-13 19:46:11 +01002639 // Filters and biases are always passed to fully connected as inputs
2640 layer = m_Network->AddFullyConnectedLayer(desc, layerName.c_str());
Finn Williamsd4fa5452021-03-01 12:31:41 +00002641
2642 ARMNN_ASSERT(layer != nullptr);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002643 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2644
Finn Williamsd4fa5452021-03-01 12:31:41 +00002645 unsigned int startingSlotIndex = 0;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002646 if (inputTensorInfo.GetNumDimensions() > 2)
2647 {
2648 // Add reshape to flatten to 2D [batch_size, input_size],
2649 // where "input_size" corresponds to the number of inputs to the layer,
2650 // matching the second dimension of weights,
2651 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2652 std::vector<unsigned int> reshapedDimensions(2);
2653 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2654 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2655
2656 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2657 {
2658 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002659 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
2660 reshapedDimensions[1],
2661 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002662 }
2663
2664 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2665 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2666
James Ward58dec6b2020-09-11 17:32:44 +01002667 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Finn Williamsd4fa5452021-03-01 12:31:41 +00002668 armnn::ReshapeDescriptor reshapeDescriptor;
2669 reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
2670 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor, layerName.c_str());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002671
2672 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2673 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2674
2675 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
Finn Williamsd4fa5452021-03-01 12:31:41 +00002676 // Fc layer connects to the reshape layer, so we skip the first input slot when registering fc's input slots
2677 tensorIndexesToRegister.erase(tensorIndexesToRegister.begin());
2678 startingSlotIndex = 1;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002679 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00002680
2681 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister, startingSlotIndex);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002682
Sadik Armagand109a4d2020-07-28 10:42:13 +01002683 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002684 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2685
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002686 // we need to add the activation layer and fortunately we don't need to care about the data layout
2687 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2688 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002689
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002690 // register the output connection slots for the layer, connections are made after all layers have been created
2691 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2692 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2693}
2694
Kevin May7d96b162021-02-03 17:38:41 +00002695void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
keidav011b3e2ea2019-02-21 10:07:37 +00002696{
2697 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2698
2699 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2700
2701 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2702 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2703 CHECK_VALID_SIZE(outputs.size(), 4);
2704
2705 // Obtain custom options from flexbuffers
2706 auto custom_options = operatorPtr->custom_options;
2707 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2708
2709 // Obtain descriptor information from tf lite
2710 DetectionPostProcessDescriptor desc;
2711 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2712 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2713 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2714 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2715 desc.m_NumClasses = m["num_classes"].AsUInt32();
2716 desc.m_ScaleH = m["h_scale"].AsFloat();
2717 desc.m_ScaleW = m["w_scale"].AsFloat();
2718 desc.m_ScaleX = m["x_scale"].AsFloat();
2719 desc.m_ScaleY = m["y_scale"].AsFloat();
2720
keidav0107d58c72019-02-26 11:57:39 +00002721 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002722 {
keidav0107d58c72019-02-26 11:57:39 +00002723 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002724 }
2725 if (!(m["detections_per_class"].IsNull()))
2726 {
2727 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2728 }
2729
2730 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2731 {
2732 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2733 "must be positive and less than or equal to 1.");
2734 }
2735
2736 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00002737 auto anchorTensorAndData = CreateConstTensorNonPermuted(inputs[2], anchorTensorInfo);
keidav011b3e2ea2019-02-21 10:07:37 +00002738
James Ward58dec6b2020-09-11 17:32:44 +01002739 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsd4fa5452021-03-01 12:31:41 +00002740 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData,
keidav011b3e2ea2019-02-21 10:07:37 +00002741 layerName.c_str());
2742
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002743 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00002744
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002745 // The model does not specify the output shapes.
2746 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2747 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2748 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2749 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2750 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2751 m_OverridenOutputShapes.push_back({ 1 });
2752
keidav011b3e2ea2019-02-21 10:07:37 +00002753 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2754 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002755 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002756 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2757 }
2758
2759 // Register the input connection slots for the layer, connections are made after all layers have been created
2760 // only the tensors for the inputs are relevant, exclude the const tensors
2761 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2762 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2763
2764 // Register the output connection slots for the layer, connections are made after all layers have been created
2765 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2766 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2767 outputTensorIndexes[1],
2768 outputTensorIndexes[2],
2769 outputTensorIndexes[3]});
2770}
2771
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002772/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
Kevin May7d96b162021-02-03 17:38:41 +00002773void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002774{
2775 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2776
2777 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2778 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2779 CHECK_VALID_SIZE(outputs.size(), 1);
2780
2781 if (inputs.size() < 1)
2782 {
2783 throw ParseException("Pack must have at least one input.");
2784 }
2785
2786 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2787 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2788
2789 StackDescriptor desc;
2790 desc.m_Axis = static_cast<uint32_t>(options->axis);
2791 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2792
2793 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2794 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2795 desc.m_InputShape = inputTensorInfo.GetShape();
2796
James Ward58dec6b2020-09-11 17:32:44 +01002797 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002798 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2799
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002800 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002801
Sadik Armagand109a4d2020-07-28 10:42:13 +01002802 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002803 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2804
2805 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2806 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2807
2808 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2809 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2810}
2811
Kevin May7d96b162021-02-03 17:38:41 +00002812void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd200e3802019-04-15 09:47:39 +01002813{
2814 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2815
2816 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2817 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2818
2819 // This unpackAxis indicates the axis to unpack
2820 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2821
2822 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2823 CHECK_VALID_SIZE(inputs.size(), 1);
2824
2825 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002826
2827 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2828 {
2829 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002830 fmt::format("The unpack axis: {} cannot be greater than or equal to "
2831 "the number of input dimension {} {}",
2832 unpackAxis,
2833 inputTensorInfo.GetNumDimensions(),
2834 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002835 }
2836
Nina Drozd200e3802019-04-15 09:47:39 +01002837 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2838 // If num is not defined, automatically infer from the length of the dimension axis.
2839 if(unpackNum == 0)
2840 {
2841 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2842 }
2843
2844 // If unpack number cannot be inferred and is still zero, throw ParseException.
2845 if(unpackNum == 0)
2846 {
2847 throw ParseException("Number to unpack must greater than zero.");
2848 }
2849
2850 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2851 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2852
2853 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2854 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2855
2856 // Add current input shape to unpackDimSizes
2857 for (unsigned int i = 0; i < inputDimSize; ++i)
2858 {
2859 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2860 }
2861
2862 if (unpackDimSizes[unpackAxis] != unpackNum)
2863 {
2864 throw ParseException("Number to unpack must be the same as length of the dimension to "
2865 "unpack along.");
2866 }
2867
2868 unpackDimSizes[unpackAxis] /= unpackNum;
2869
2870 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2871 for (unsigned int j = 0; j < unpackNum; ++j)
2872 {
2873 // Set the size of the views.
2874 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2875 {
2876 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2877 }
2878 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2879 }
2880
James Ward58dec6b2020-09-11 17:32:44 +01002881 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01002882 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002883 ARMNN_ASSERT(layer != nullptr);
Nina Drozd200e3802019-04-15 09:47:39 +01002884
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002885 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2886 unpackDimSizes.data());
2887
Nina Drozd200e3802019-04-15 09:47:39 +01002888 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2889 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2890
Finn Williamsb49ed182021-06-29 15:50:08 +01002891 std::vector<unsigned int> reshapeDims;
2892 for (unsigned int axis = 0; axis < splitOutShape.GetNumDimensions(); ++axis)
2893 {
2894 if (axis != unpackAxis)
2895 {
2896 reshapeDims.push_back(splitOutShape[axis]);
2897 }
2898 }
2899
2900 TensorShape reshapeOutputShape(splitOutShape.GetNumDimensions() -1, reshapeDims.data());
2901
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002902 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2903 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2904 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002905 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01002906 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002907 armnn::ReshapeDescriptor desc;
Finn Williamsb49ed182021-06-29 15:50:08 +01002908 desc.m_TargetShape = reshapeOutputShape;
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002909 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2910
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002911 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2912 outputTensorInfo.GetDataType(),
2913 outputTensorInfo.GetQuantizationScale(),
2914 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002915 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2916
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002917 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002918
2919 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2920 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2921 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2922 }
Nina Drozd200e3802019-04-15 09:47:39 +01002923}
2924
Kevin May7d96b162021-02-03 17:38:41 +00002925void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd0324f482019-04-08 10:52:10 +01002926{
2927 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2928
2929 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2930 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2931
2932 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2933
Nina Drozd200e3802019-04-15 09:47:39 +01002934 // If number of splits cannot be inferred and is zero, throw ParseException.
2935 if(numSplits == 0)
2936 {
2937 throw ParseException("Number to splits must greater than zero.");
2938 }
2939
Nina Drozd0324f482019-04-08 10:52:10 +01002940 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2941 CHECK_VALID_SIZE(inputs.size(), 2);
2942 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2943 CHECK_VALID_SIZE(outputs.size(), numSplits);
2944
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002945 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2946 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
2947 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Nina Drozd0324f482019-04-08 10:52:10 +01002948
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002949 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002950 if (axisBufferPtr == nullptr)
2951 {
2952 throw ParseException(
2953 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
2954 CHECK_LOCATION().AsString()));
2955 }
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002956
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002957 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
2958 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2959 int32_t axis = axisData[0];
2960
2961 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
2962 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
2963 {
2964 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
2965 // E.g. Rank 4 tensor can have axis in range [-4, 3)
2966 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
2967 throw ParseException(
2968 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
2969 axis,
2970 CHECK_LOCATION().AsString()));
2971 }
2972
2973 const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
Nina Drozd0324f482019-04-08 10:52:10 +01002974
Nina Drozd0324f482019-04-08 10:52:10 +01002975 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002976 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002977 {
2978 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002979 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
2980 inputTensorInfo.GetNumDimensions(),
2981 MaxNumOfTensorDimensions,
2982 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01002983 }
2984
2985 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2986
2987 // Add current input shape to splitterDimSizes
2988 for (unsigned int i = 0; i < inputDimSize; ++i)
2989 {
2990 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2991 }
2992
2993 if (splitterDimSizes[splitDim] % numSplits != 0)
2994 {
2995 throw ParseException("Number of splits must evenly divide the dimension");
2996 }
2997 splitterDimSizes[splitDim] /= numSplits;
2998
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002999 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01003000 for (unsigned int j = 0; j < numSplits; ++j)
3001 {
3002 // Set the size of the views.
3003 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
3004 {
3005 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
3006 }
3007 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
3008 }
3009
James Ward58dec6b2020-09-11 17:32:44 +01003010 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01003011 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003012 ARMNN_ASSERT(layer != nullptr);
Nina Drozd0324f482019-04-08 10:52:10 +01003013
3014 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01003015 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01003016
Nina Drozd0324f482019-04-08 10:52:10 +01003017 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
3018 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01003019 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01003020 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01003021 }
3022
3023 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3024 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3025}
3026
Derek Lambertif0176992020-04-28 13:37:49 +01003027unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
3028{
3029 int numDims = armnn::numeric_cast<int>(numDimsIn);
3030 int v = idx < 0 ? numDims + idx : idx;
3031 ARMNN_ASSERT(v >= 0);
3032 ARMNN_ASSERT(v < numDims);
3033
3034 return static_cast<unsigned int>(v);
3035}
3036
Kevin May7d96b162021-02-03 17:38:41 +00003037void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
Derek Lambertif0176992020-04-28 13:37:49 +01003038{
3039 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3040
3041 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Ryan OShea86704732020-05-26 11:41:04 +01003042 const auto * options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01003043
3044 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3045 CHECK_VALID_SIZE(inputs.size(), 3);
3046
3047 auto& inputTensor = inputs[0];
3048 auto& splitsTensor = inputs[1];
3049 auto& axisTensor = inputs[2];
3050
3051 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
3052 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
3053 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
3054 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
3055
3056 // Inputs
3057 auto inputDimSize = inputTensorInfo.GetNumDimensions();
3058 if (inputDimSize > MaxNumOfTensorDimensions)
3059 {
3060 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003061 fmt::format("The number of dimensions: {} for input tensors of the "
3062 "SplitV op cannot be greater than {} {}",
3063 inputTensorInfo.GetNumDimensions(),
3064 MaxNumOfTensorDimensions,
3065 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01003066 }
3067
3068 // Get split axis
3069 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003070 if (axisBufferPtr == nullptr)
3071 {
3072 throw ParseException(
3073 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
3074 CHECK_LOCATION().AsString()));
3075 }
3076
Derek Lambertif0176992020-04-28 13:37:49 +01003077 std::vector<int> axisData(axisTensorInfo.GetNumElements());
3078 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003079 int32_t axis = axisData[0];
3080
3081 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3082 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3083 {
3084 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
3085 // E.g. Rank 4 tensor can have axis in range [-4, 3)
3086 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
3087 throw ParseException(
3088 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
3089 axis,
3090 CHECK_LOCATION().AsString()));
3091 }
3092 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
Derek Lambertif0176992020-04-28 13:37:49 +01003093
Derek Lambertif0176992020-04-28 13:37:49 +01003094 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01003095 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01003096 unsigned int numSplits{0};
3097
3098 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01003099 {
3100 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01003101 }
3102 else
3103 {
Ryan OShea86704732020-05-26 11:41:04 +01003104 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01003105 }
3106
3107 if (numSplits <=0)
3108 {
3109 throw ParseException("SplitV has invalid number of splits");
3110 }
3111
Jan Eilersc0761e92020-06-29 16:48:44 +01003112 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01003113 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01003114 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01003115
Jan Eilersc0761e92020-06-29 16:48:44 +01003116 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01003117 int numInferred{0};
3118 unsigned int inferIdx{0};
3119 int splitSum{0};
3120 for (auto split : splitsData)
3121 {
3122 if (split < 0)
3123 {
3124 numInferred++;
3125 inferIdx = idx;
3126 }
3127 else
3128 {
3129 splitSum += split;
3130 }
3131 idx++;
3132 }
3133 // Check for inferred Axis
3134 if (numInferred == 0)
3135 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003136 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01003137 {
3138 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
3139 }
3140 }
3141 else if (numInferred == 1)
3142 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003143 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01003144 }
3145 else
3146 {
3147 throw ParseException("Cannot infer split size for more than one split");
3148 }
3149
Derek Lambertif0176992020-04-28 13:37:49 +01003150 //Ouput size validation
3151 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3152 CHECK_VALID_SIZE(outputs.size(), numSplits);
3153
3154 // Setup Armnn descriptor
3155 SplitterDescriptor splitDesc(numSplits, inputDimSize);
3156 unsigned int accumSplit = 0;
3157 for (unsigned int j = 0; j < numSplits; ++j)
3158 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003159 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01003160
3161 // Set the size of the views.
3162 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
3163 {
3164 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
3165 if (dimIdx == splitDim)
3166 {
3167 dimSize = splitSize;
3168 }
3169 splitDesc.SetViewSize(j, dimIdx, dimSize);
3170 }
3171
3172 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
3173 accumSplit += splitSize;
3174 }
3175
James Ward58dec6b2020-09-11 17:32:44 +01003176 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01003177 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003178 ARMNN_ASSERT(layer != nullptr);
Derek Lambertif0176992020-04-28 13:37:49 +01003179
3180 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3181 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3182
3183 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
3184 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01003185 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01003186 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
3187 }
3188
3189 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3190 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3191}
3192
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003193void TfLiteParserImpl::ParseArgMin(size_t subgraphIndex, size_t operatorIndex)
3194{
3195 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Min);
3196}
3197
Kevin May7d96b162021-02-03 17:38:41 +00003198void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
Inki Daed4619e22020-09-10 15:33:54 +09003199{
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003200 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Max);
3201}
3202
3203void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, ArgMinMaxFunction argMinMaxFunction)
3204{
Inki Daed4619e22020-09-10 15:33:54 +09003205 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3206 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3207 CHECK_VALID_SIZE(inputs.size(), 2);
3208
3209 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3210 CHECK_VALID_SIZE(outputs.size(), 1);
3211
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003212 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3213 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[1]);
Inki Daed4619e22020-09-10 15:33:54 +09003214 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003215 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003216
3217 // Check if output tensor type is Signed32 or Signed64
Mike Kelly1f140f72021-04-06 12:25:55 +01003218 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
3219 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
3220 {
3221 throw ParseException(
3222 fmt::format(
3223 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
3224 CHECK_LOCATION().AsString()));
3225 }
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003226
3227 // Get const axis value from model and set it to descriptor.
3228 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3229 if (axisBufferPtr == nullptr)
3230 {
3231 throw ParseException(
3232 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
3233 CHECK_LOCATION().AsString()));
3234 }
3235
3236 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
3237 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
3238 int32_t axis = axisData.front();
3239
3240 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3241 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3242 {
3243 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
3244 // E.g. Rank 4 tensor can have axis in range [-4, 3)
3245 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
3246 throw ParseException(
3247 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
3248 axis,
3249 CHECK_LOCATION().AsString()));
3250 }
3251
3252 ArgMinMaxDescriptor desc;
3253 desc.m_Axis = axis;
3254 desc.m_Function = argMinMaxFunction;
3255
3256 // Register a ArgMin/ArgMax layer.
3257 auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}";
3258 auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3259 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str());
3260 ARMNN_ASSERT(layer != nullptr);
Inki Daed4619e22020-09-10 15:33:54 +09003261 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3262
3263 // Register input tensor to the layer.
3264 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3265 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3266
3267 // Register output tensor to the layer.
3268 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3269 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3270}
3271
Kevin May7d96b162021-02-03 17:38:41 +00003272void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00003273{
3274 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3275
Kevin May7d96b162021-02-03 17:38:41 +00003276 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003277 CHECK_VALID_SIZE(inputs.size(), 2);
Kevin May7d96b162021-02-03 17:38:41 +00003278 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003279 CHECK_VALID_SIZE(outputs.size(), 1);
3280
3281 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3282 armnn::TensorInfo indicesTensorInfo = ToTensorInfo(inputs[1]);
3283 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3284
3285 armnn::GatherDescriptor gatherDescriptor;
3286
3287 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3288 const auto * options = operatorPtr->builtin_options.AsGatherOptions();
3289 auto axis = options->axis;
3290
3291 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3292 auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
3293 auto outputDimensions = outputTensorInfo.GetNumDimensions();
3294 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3295 {
3296 throw ParseException(
3297 fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
3298 axis,
3299 inputDimensions, inputDimensions,
3300 CHECK_LOCATION().AsString()));
3301 }
3302 if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
3303 {
3304 throw ParseException(
3305 fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
3306 outputDimensions,
3307 inputDimensions, indicesDimensions,
3308 CHECK_LOCATION().AsString()));
3309 }
3310
3311 gatherDescriptor.m_Axis = axis;
3312
3313 auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
3314 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
3315 ARMNN_ASSERT(layer != nullptr);
3316 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3317
3318 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3319 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3320
3321 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3322 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3323}
3324
Kevin May7d96b162021-02-03 17:38:41 +00003325void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00003326{
3327 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3328
Kevin May7d96b162021-02-03 17:38:41 +00003329 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003330 CHECK_VALID_SIZE(inputs.size(), 1);
Kevin May7d96b162021-02-03 17:38:41 +00003331 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003332 CHECK_VALID_SIZE(outputs.size(), 1);
3333
3334 armnn::DepthToSpaceDescriptor descriptor;
3335
3336 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3337 const auto * options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
3338 auto blockSize = options->block_size;
3339 if (blockSize < 2)
3340 {
3341 throw ParseException(
3342 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
3343 blockSize,
3344 CHECK_LOCATION().AsString()));
3345 }
3346 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
3347
3348 auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
3349 IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
3350 ARMNN_ASSERT(layer != nullptr);
3351 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3352 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3353
3354 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3355 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3356
3357 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3358 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3359}
3360
Kevin May7d96b162021-02-03 17:38:41 +00003361void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003362{
Sadik Armagana2747482021-02-09 10:28:54 +00003363 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Sum);
3364}
3365
Teresa Charlin4e3e8312021-08-05 12:34:37 +01003366void TfLiteParserImpl::ParseReduceProd(size_t subgraphIndex, size_t operatorIndex)
3367{
3368 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Prod);
3369}
3370
Sadik Armagana2747482021-02-09 10:28:54 +00003371void TfLiteParserImpl::ParseReduceMax(size_t subgraphIndex, size_t operatorIndex)
3372{
3373 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Max);
3374}
3375
3376void TfLiteParserImpl::ParseReduceMin(size_t subgraphIndex, size_t operatorIndex)
3377{
3378 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Min);
3379}
3380
3381void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, ReduceOperation reduceOperation)
3382{
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003383 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3384
3385 const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3386 const auto *options = operatorPtr->builtin_options.AsReducerOptions();
3387
3388 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3389 CHECK_VALID_SIZE(inputs.size(), 2);
3390
3391 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3392 CHECK_VALID_SIZE(outputs.size(), 1);
3393
Sadik Armagana2747482021-02-09 10:28:54 +00003394 auto layerName = fmt::format("Reduce:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003395
3396 armnn::TensorInfo inputTensorInfo0 = ToTensorInfo(inputs[0]);
3397 armnn::TensorInfo inputTensorInfo1 = ToTensorInfo(inputs[1]);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003398
3399 ReduceDescriptor desc;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003400 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3401 // Get const axis value from model and set it to descriptor.
3402 if (axisBufferPtr != nullptr)
3403 {
Sadik Armagan49bdb792021-02-11 13:57:07 +00003404 std::vector<int32_t> axisData(inputTensorInfo1.GetNumElements());
3405 ::memcpy(axisData.data(), axisBufferPtr->data.data(), inputTensorInfo1.GetNumBytes());
3406
3407 // Convert the axis to unsigned int and remove duplicates.
3408 auto rank = static_cast<int32_t>(inputTensorInfo0.GetNumDimensions());
3409 std::set<unsigned int> uniqueAxis;
3410 std::transform(axisData.begin(),
3411 axisData.end(),
3412 std::inserter(uniqueAxis, uniqueAxis.begin()),
3413 [rank](int i)->unsigned int{
3414 return static_cast<uint32_t>(((i + rank) % rank)); });
3415 desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003416 }
Sadik Armagana2747482021-02-09 10:28:54 +00003417 else
3418 {
3419 for (uint32_t i = 0; i < inputTensorInfo0.GetNumDimensions(); ++i)
3420 {
3421 desc.m_vAxis.push_back(i);
3422 }
3423 }
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003424
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003425 desc.m_KeepDims = options->keep_dims;
Sadik Armagana2747482021-02-09 10:28:54 +00003426 desc.m_ReduceOperation = reduceOperation;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003427
3428 // Register a new layer object, Sum.
3429 IConnectableLayer *layer = m_Network->AddReduceLayer(desc, layerName.c_str());
3430
3431 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
3432 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3433
3434 // Register input tensor to the layer.
3435 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3436 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3437
3438 // Register output tensor to the layer.
3439 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3440 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3441}
3442
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003443void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
3444{
3445 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
3446}
3447
3448void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
3449{
3450 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
3451}
3452
Mike Kelly31dce2b2021-09-01 21:22:37 +01003453void TfLiteParserImpl::ParseLocalResponseNormalization(size_t subgraphIndex, size_t operatorIndex)
3454{
3455 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3456
3457 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3458 CHECK_VALID_SIZE(inputs.size(), 1);
3459
3460 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3461 CHECK_VALID_SIZE(outputs.size(), 1);
3462
3463 auto layerName = fmt::format("LRN:{}:{}", subgraphIndex, operatorIndex);
3464 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3465
3466 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3467
3468 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3469 const auto* options = operatorPtr->builtin_options.AsLocalResponseNormalizationOptions();
3470
3471 armnn::NormalizationDescriptor descriptor;
3472 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3473 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3474 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3475 descriptor.m_NormSize = static_cast<uint32_t>(options->radius);
3476 descriptor.m_K = options->bias;
3477 descriptor.m_Alpha = options->alpha;
3478 descriptor.m_Beta = options->beta;
3479
3480 // ArmNN expects normSize to be the full size of the normalization
3481 // window rather than the radius as in TfLite.
3482 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3483
3484 IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor, layerNameFormatted.c_str());
3485 ARMNN_ASSERT(layer != nullptr);
3486
3487 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3488 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3489
3490 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3491 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3492
3493 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3494 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3495}
3496
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003497void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
3498{
3499 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
3500}
3501
3502void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
3503{
3504 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
3505}
3506
3507void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
3508{
3509 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
3510}
3511
3512void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
3513{
3514 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3515
3516 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3517 CHECK_VALID_SIZE(inputs.size(), 1);
3518
3519 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3520 CHECK_VALID_SIZE(outputs.size(), 1);
3521
3522 std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
3523 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3524
3525 ElementwiseUnaryDescriptor desc;
3526 desc.m_Operation = unaryOperation;
3527 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
3528 ARMNN_ASSERT(layer != nullptr);
3529
3530 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3531 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3532
3533 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3534 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3535
3536 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3537 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3538}
3539
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03003540void TfLiteParserImpl::ParseEqual(size_t subgraphIndex, size_t operatorIndex)
3541{
3542 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Equal);
3543}
3544
3545void TfLiteParserImpl::ParseNotEqual(size_t subgraphIndex, size_t operatorIndex)
3546{
3547 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::NotEqual);
3548}
3549
3550void TfLiteParserImpl::ParseGreater(size_t subgraphIndex, size_t operatorIndex)
3551{
3552 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Greater);
3553}
3554
3555void TfLiteParserImpl::ParseGreaterOrEqual(size_t subgraphIndex, size_t operatorIndex)
3556{
3557 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::GreaterOrEqual);
3558}
3559
3560void TfLiteParserImpl::ParseLess(size_t subgraphIndex, size_t operatorIndex)
3561{
3562 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Less);
3563}
3564
3565void TfLiteParserImpl::ParseLessOrEqual(size_t subgraphIndex, size_t operatorIndex)
3566{
3567 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::LessOrEqual);
3568}
3569
3570void TfLiteParserImpl::ParseComparison(size_t subgraphIndex, size_t operatorIndex,
3571 ComparisonOperation comparisonOperation)
3572{
3573 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3574
3575 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3576 CHECK_VALID_SIZE(inputs.size(), 2);
3577
3578 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3579 CHECK_VALID_SIZE(outputs.size(), 1);
3580
3581 auto layerName = std::string(GetComparisonOperationAsCString(comparisonOperation)) + ":{}:{}";
3582 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3583
3584 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3585 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
3586 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerNameFormatted, "Input 0", "Input 1");
3587
3588 ComparisonDescriptor desc;
3589 desc.m_Operation = comparisonOperation;
3590 IConnectableLayer* layer = m_Network->AddComparisonLayer(desc, layerNameFormatted.c_str());
3591 ARMNN_ASSERT(layer != nullptr);
3592
3593 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3594 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3595
3596 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3597 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3598
3599 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3600 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3601}
3602
Kevin May7d96b162021-02-03 17:38:41 +00003603armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
3604 unsigned int outputSlot,
3605 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01003606{
3607 ActivationDescriptor activationDesc;
3608 std::string layerName = prevLayer->GetName();
3609
3610 switch(activationType)
3611 {
3612 case tflite::ActivationFunctionType_NONE:
3613 {
3614 // this is a no-op: return previous layer
3615 return prevLayer;
3616 }
3617 case tflite::ActivationFunctionType_RELU:
3618 {
3619 activationDesc.m_Function = ActivationFunction::ReLu;
3620 layerName += ":RELU";
3621 break;
3622 }
3623 case tflite::ActivationFunctionType_RELU6:
3624 {
3625 activationDesc.m_Function = ActivationFunction::BoundedReLu;
3626 activationDesc.m_A = 6.0f;
3627 activationDesc.m_B = 0.0f;
3628 layerName += ":RELU6";
3629 break;
3630 }
3631 case tflite::ActivationFunctionType_TANH:
3632 {
3633 activationDesc.m_Function = ActivationFunction::TanH;
3634 activationDesc.m_A = 1.0f;
3635 activationDesc.m_B = 1.0f;
3636 layerName += ":TANH";
3637 break;
3638 }
3639
3640 // I only put these here as a reminder what others we could support
3641 case tflite::ActivationFunctionType_RELU_N1_TO_1:
3642 case tflite::ActivationFunctionType_SIGN_BIT:
3643 default:
3644 {
3645 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003646 fmt::format("TfLite parser doesn't suppport fused activation: "
3647 "{}/{} {} ",
3648 activationType,
3649 tflite::EnumNameActivationFunctionType(activationType),
3650 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003651
3652 }
3653 }
3654
3655 IConnectableLayer* activationLayer =
3656 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
3657
3658 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
3659 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
3660 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
3661 return activationLayer;
3662}
3663
Kevin May7d96b162021-02-03 17:38:41 +00003664TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char * fileName)
telsoa01c577f2c2018-08-31 09:22:23 +01003665{
3666 if (fileName == nullptr)
3667 {
James Ward58dec6b2020-09-11 17:32:44 +01003668 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003669 CHECK_LOCATION().AsString()));
3670 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01003671 std::error_code errorCode;
3672 fs::path pathToFile(fileName);
3673 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01003674 {
James Ward58dec6b2020-09-11 17:32:44 +01003675 //fmt::format() could not be used here (format error)
3676 std::stringstream msg;
3677 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
3678 << " " << CHECK_LOCATION().AsString();
3679
3680 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01003681 }
3682 std::ifstream file(fileName, std::ios::binary);
3683 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3684 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3685 fileContent.size());
3686}
3687
Kevin May7d96b162021-02-03 17:38:41 +00003688TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
telsoa01c577f2c2018-08-31 09:22:23 +01003689{
3690 if (binaryContent == nullptr)
3691 {
James Ward58dec6b2020-09-11 17:32:44 +01003692 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003693 CHECK_LOCATION().AsString()));
3694 }
3695 flatbuffers::Verifier verifier(binaryContent, len);
3696 if (verifier.VerifyBuffer<tflite::Model>() == false)
3697 {
3698 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003699 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
3700 "flatbuffers format. size:{} {}",
3701 len,
3702 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003703 }
3704 return tflite::UnPackModel(binaryContent);
3705}
3706
Kevin May7d96b162021-02-03 17:38:41 +00003707TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetInputs(const ModelPtr & model,
3708 size_t subgraphIndex,
3709 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003710{
3711 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3712
Derek Lambertiff05cc52019-04-26 13:05:17 +01003713 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3714 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003715
3716 size_t inputCount = operatorPtr->inputs.size();
mathad01c21025d2021-04-26 10:09:37 +01003717 TensorRawPtrVector result;
telsoa01c577f2c2018-08-31 09:22:23 +01003718 for (size_t i=0; i<inputCount; ++i)
3719 {
mathad01c21025d2021-04-26 10:09:37 +01003720 // If the input location is -1 then assume input is turned off.
3721 if (operatorPtr->inputs[i] == -1)
3722 {
3723 continue;
3724 }
3725 else
3726 {
3727 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
3728 result.push_back(subgraphPtr->tensors[inputId].get());
3729 }
telsoa01c577f2c2018-08-31 09:22:23 +01003730 }
3731 return result;
3732}
3733
Kevin May7d96b162021-02-03 17:38:41 +00003734TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetOutputs(const ModelPtr & model,
3735 size_t subgraphIndex,
3736 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003737{
3738 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3739
Derek Lambertiff05cc52019-04-26 13:05:17 +01003740 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3741 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003742
3743 size_t outputCount = operatorPtr->outputs.size();
3744 TensorRawPtrVector result(outputCount);
3745 for (size_t i=0; i<outputCount; ++i)
3746 {
3747 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3748 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003749 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003750 }
3751 return result;
3752}
3753
Kevin May7d96b162021-02-03 17:38:41 +00003754TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphInputs(const ModelPtr & model,
3755 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003756{
3757 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003758 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003759
Derek Lambertiff05cc52019-04-26 13:05:17 +01003760 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003761 TensorIdRawPtrVector result(inputCount);
3762 for (size_t i=0; i<inputCount; ++i)
3763 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003764 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01003765 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003766 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003767 }
3768 return result;
3769}
3770
Kevin May7d96b162021-02-03 17:38:41 +00003771TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphOutputs(const ModelPtr & model,
3772 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003773{
3774 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003775 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003776
Derek Lambertiff05cc52019-04-26 13:05:17 +01003777 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003778 TensorIdRawPtrVector result(outputCount);
3779 for (size_t i=0; i<outputCount; ++i)
3780 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003781 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3782 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003783 }
3784 return result;
3785}
3786
Kevin May7d96b162021-02-03 17:38:41 +00003787std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
3788 size_t subgraphIndex,
3789 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003790{
3791 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003792 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3793 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003794 return operatorPtr->inputs;
3795}
3796
Kevin May7d96b162021-02-03 17:38:41 +00003797std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
3798 size_t subgraphIndex,
3799 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003800{
3801 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003802 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3803 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003804 return operatorPtr->outputs;
3805}
3806
Kevin May7d96b162021-02-03 17:38:41 +00003807void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
3808 size_t operatorIndex,
3809 IConnectableLayer* layer,
Finn Williamsd4fa5452021-03-01 12:31:41 +00003810 const std::vector<unsigned int>& tensorIndexes,
3811 unsigned int startingSlotIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003812{
3813 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003814 ARMNN_ASSERT(layer != nullptr);
Matthew Sloyan81beae32021-07-13 19:46:11 +01003815
Finn Williamsd4fa5452021-03-01 12:31:41 +00003816 if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots())
telsoa01c577f2c2018-08-31 09:22:23 +01003817 {
3818 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003819 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
3820 " for subgraph:{} operator index:{} {}",
3821 tensorIndexes.size(),
3822 layer->GetNumInputSlots(),
3823 subgraphIndex,
3824 operatorIndex,
3825 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003826 }
3827
Finn Williamsd4fa5452021-03-01 12:31:41 +00003828 for (unsigned int index = 0; index < tensorIndexes.size() ; ++index)
telsoa01c577f2c2018-08-31 09:22:23 +01003829 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00003830 unsigned int tensorIndex = tensorIndexes[index];
3831 armnn::IInputSlot* slot = &(layer->GetInputSlot(startingSlotIndex + index));
telsoa01c577f2c2018-08-31 09:22:23 +01003832 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
3833 }
3834}
3835
Kevin May7d96b162021-02-03 17:38:41 +00003836void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
3837 size_t operatorIndex,
3838 IConnectableLayer* layer,
3839 const std::vector<unsigned int>& tensorIndexes)
telsoa01c577f2c2018-08-31 09:22:23 +01003840{
3841 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003842 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003843 if (tensorIndexes.size() != layer->GetNumOutputSlots())
3844 {
3845 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003846 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
3847 " for subgraph:{} operator index:{} {}",
3848 tensorIndexes.size(),
3849 layer->GetNumOutputSlots(),
3850 subgraphIndex,
3851 operatorIndex,
3852 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003853 }
3854
3855 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
3856 {
3857 unsigned int tensorIndex = tensorIndexes[slotIndex];
3858 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
3859 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3860 }
3861}
3862
Kevin May7d96b162021-02-03 17:38:41 +00003863void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003864{
3865 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3866
3867 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
3868 for (auto const & tensorIdAndPtr : inputs)
3869 {
3870 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3871 IConnectableLayer* layer =
3872 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3873
3874 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
3875 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3876
3877 RegisterOutputSlots(subgraphIndex,
3878 VIRTUAL_OPERATOR_ID,
3879 layer,
3880 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3881 }
3882}
3883
Kevin May7d96b162021-02-03 17:38:41 +00003884void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003885{
3886 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3887
3888 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
3889 for (auto const & tensorIdAndPtr : outputs)
3890 {
3891 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3892 IConnectableLayer* layer =
3893 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3894
3895 RegisterInputSlots(subgraphIndex,
3896 VIRTUAL_OPERATOR_ID,
3897 layer,
3898 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3899 }
3900}
3901
Kevin May7d96b162021-02-03 17:38:41 +00003902void TfLiteParserImpl::SetupConstantLayers(size_t subgraphIndex)
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003903{
3904 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3905
Derek Lambertiff05cc52019-04-26 13:05:17 +01003906 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003907 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
3908 {
3909 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
3910 {
3911 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
3912 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
3913 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003914 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003915
Matthew Sloyan81beae32021-07-13 19:46:11 +01003916 if(IsConstTensor(tensorPtr))
3917 {
3918 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
3919 auto tensorAndData = CreateConstTensorNonPermuted(tensorPtr, tensorInfo);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003920
Matthew Sloyan81beae32021-07-13 19:46:11 +01003921 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
3922 IConnectableLayer *layer = m_Network->AddConstantLayer(tensorAndData, layerName.c_str());
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003923
Matthew Sloyan81beae32021-07-13 19:46:11 +01003924 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3925 RegisterOutputSlots(subgraphIndex,
3926 VIRTUAL_OPERATOR_ID,
3927 layer,
3928 { tensorIndex });
3929 }
3930 else
3931 {
3932 throw ParseException(
3933 fmt::format("Invalid Tensor: Tensor should be constant. {}",
3934 CHECK_LOCATION().AsString()));
3935 }
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003936 }
3937 }
3938 }
3939}
3940
telsoa01c577f2c2018-08-31 09:22:23 +01003941// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Kevin May7d96b162021-02-03 17:38:41 +00003942TfLiteParserImpl::BufferRawPtr TfLiteParserImpl::GetBuffer(const ModelPtr& model, size_t bufferIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003943{
3944 CHECK_BUFFER(model, bufferIndex);
3945 return model->buffers[bufferIndex].get();
3946}
3947
Matteo Martincigh747ef822018-12-18 09:26:39 +00003948template<typename T>
Kevin May7d96b162021-02-03 17:38:41 +00003949std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
3950TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
3951 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00003952 armnn::TensorInfo& tensorInfo,
3953 armnn::Optional<armnn::PermutationVector&> permutationVector)
3954{
Matthew Sloyan81beae32021-07-13 19:46:11 +01003955 // Make sure isConstant flag is set.
3956 tensorInfo.SetConstant();
3957
Matteo Martincigh747ef822018-12-18 09:26:39 +00003958 auto constData = CreateConstTensorImpl<T>(bufferPtr,
3959 tensorPtr,
3960 tensorInfo,
3961 permutationVector);
Kevin May7d96b162021-02-03 17:38:41 +00003962 TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
Matteo Martincigh747ef822018-12-18 09:26:39 +00003963 return std::make_pair(constData.first, std::move(storage));
3964}
3965
Finn Williamsd4fa5452021-03-01 12:31:41 +00003966bool TfLiteParserImpl::IsConstTensor(TensorRawPtr tensorPtr)
3967{
3968 CHECK_TENSOR_PTR(tensorPtr);
mathad01bf7edb62021-04-20 16:12:45 +01003969 bool isConst = true;
3970
3971 auto buffer = GetBuffer(m_Model, tensorPtr->buffer);
3972 if (buffer->data.size() == 0)
3973 {
3974 isConst = false;
3975 }
3976
3977 return isConst;
Finn Williamsd4fa5452021-03-01 12:31:41 +00003978}
3979
Kevin May7d96b162021-02-03 17:38:41 +00003980std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
Finn Williamsd4fa5452021-03-01 12:31:41 +00003981TfLiteParserImpl::CreateConstTensorPermuted(TensorRawPtr tensorPtr,
3982 armnn::TensorInfo& tensorInfo,
3983 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01003984{
3985 CHECK_TENSOR_PTR(tensorPtr);
3986 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
3987 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
3988
Matthew Sloyan81beae32021-07-13 19:46:11 +01003989 // Make sure isConstant flag is set.
3990 tensorInfo.SetConstant();
3991
telsoa01c577f2c2018-08-31 09:22:23 +01003992 switch (tensorInfo.GetDataType())
3993 {
3994 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003995 return CreateConstTensorAndStoreData<float>(bufferPtr,
3996 tensorPtr,
3997 tensorInfo,
3998 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00003999 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00004000 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
4001 tensorPtr,
4002 tensorInfo,
4003 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00004004 case armnn::DataType::QSymmS8:
4005 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
4006 tensorPtr,
4007 tensorInfo,
4008 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00004009 case armnn::DataType::QAsymmS8:
4010 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
4011 tensorPtr,
4012 tensorInfo,
4013 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01004014 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00004015 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
4016 tensorPtr,
4017 tensorInfo,
4018 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01004019 default:
4020 {
4021 std::stringstream errString;
4022 errString << "Unexpected datatype when creating const tensor: "
4023 << armnn::GetDataTypeName(tensorInfo.GetDataType())
4024 << " shape:" << tensorInfo.GetShape()
4025 << CHECK_LOCATION().AsString();
4026 throw ParseException(errString.str());
4027 }
4028 }
4029}
4030
Finn Williamsd4fa5452021-03-01 12:31:41 +00004031armnn::ConstTensor TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
4032 armnn::TensorInfo& tensorInfo)
4033{
4034 CHECK_TENSOR_PTR(tensorPtr);
4035 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
4036 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
4037
Matthew Sloyan81beae32021-07-13 19:46:11 +01004038 // Make sure isConstant flag is set.
4039 tensorInfo.SetConstant();
4040
Finn Williamsd4fa5452021-03-01 12:31:41 +00004041 return ConstTensor(tensorInfo, bufferPtr->data.data());
4042}
4043
Kevin May7d96b162021-02-03 17:38:41 +00004044BindingPointInfo TfLiteParserImpl::GetNetworkInputBindingInfo(size_t subgraphId,
4045 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01004046{
4047 CHECK_SUBGRAPH(m_Model, subgraphId);
4048 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
4049 for (auto const & input : inputs)
4050 {
4051 if (input.second->name == name)
4052 {
4053 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
4054 return std::make_pair(bindingId, ToTensorInfo(input.second));
4055 }
4056 }
4057
4058 std::stringstream bindings;
4059 for (auto const & input : inputs)
4060 {
4061 bindings << "'" << input.second->name << "' ";
4062 }
4063
4064 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004065 fmt::format("No input binding found for subgraph:{} and name:{}. "
4066 "Possible inputs are: [{}] {}",
4067 subgraphId,
4068 name,
4069 bindings.str(),
4070 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004071}
4072
Kevin May7d96b162021-02-03 17:38:41 +00004073BindingPointInfo TfLiteParserImpl::GetNetworkOutputBindingInfo(size_t subgraphId,
4074 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01004075{
4076 CHECK_SUBGRAPH(m_Model, subgraphId);
4077 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00004078 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01004079 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00004080 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01004081 if (output.second->name == name)
4082 {
4083 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00004084 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
4085 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
4086 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01004087 }
4088 }
4089
4090 std::stringstream bindings;
4091 for (auto const & output : outputs)
4092 {
4093 bindings << "'" << output.second->name << "' ";
4094 }
4095
4096 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004097 fmt::format("No output binding found for subgraph:{} and name:{}. "
4098 "Possible outputs are: [{}] {}",
4099 subgraphId,
4100 name,
4101 bindings.str(),
4102 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004103}
4104
Kevin May7d96b162021-02-03 17:38:41 +00004105size_t TfLiteParserImpl::GetSubgraphCount() const
telsoa01c577f2c2018-08-31 09:22:23 +01004106{
4107 return m_Model->subgraphs.size();
4108}
4109
Kevin May7d96b162021-02-03 17:38:41 +00004110std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01004111{
4112 CHECK_SUBGRAPH(m_Model, subgraphId);
4113 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
4114 std::vector<std::string> result;
4115 result.reserve(inputs.size());
4116 for (auto const & input : inputs)
4117 {
4118 result.push_back(input.second->name);
4119 }
4120 return result;
4121}
4122
Kevin May7d96b162021-02-03 17:38:41 +00004123std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01004124{
4125 CHECK_SUBGRAPH(m_Model, subgraphId);
4126 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
4127 std::vector<std::string> result;
4128 result.reserve(outputs.size());
4129 for (auto const & output : outputs)
4130 {
4131 result.push_back(output.second->name);
4132 }
4133 return result;
4134}
4135
Matthew Sloyanac001ee2021-02-03 10:43:04 +00004136const std::string TfLiteParserImpl::GetVersion()
4137{
4138 return TFLITE_PARSER_VERSION;
4139}
4140
Kevin May7d96b162021-02-03 17:38:41 +00004141TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01004142: m_FloatData(std::move(data))
4143, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00004144, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01004145, m_Int32Data(nullptr)
4146{
4147}
4148
Kevin May7d96b162021-02-03 17:38:41 +00004149TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01004150: m_FloatData(nullptr)
4151, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00004152, m_Int8Data(nullptr)
4153, m_Int32Data(nullptr)
4154{
4155}
4156
Kevin May7d96b162021-02-03 17:38:41 +00004157TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
Keith Davisd305e1a2020-01-22 11:57:54 +00004158: m_FloatData(nullptr)
4159, m_Uint8Data(nullptr)
4160, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01004161, m_Int32Data(nullptr)
4162{
4163}
4164
Kevin May7d96b162021-02-03 17:38:41 +00004165TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01004166: m_FloatData(nullptr)
4167, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00004168, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01004169, m_Int32Data(std::move(data))
4170{
4171}
4172
4173} // armnnTfLiteParser