blob: 95e0e0af6eb08b2eb5252e893f3e58e1d89fc131 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kellyc5789ca2020-07-06 19:24:15 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Sloyanac001ee2021-02-03 10:43:04 +00008#include "armnnTfLiteParser/Version.hpp"
9
Sadik Armagand109a4d2020-07-28 10:42:13 +010010#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +000011#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010012#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000013#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010014#include <armnn/Tensor.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000015#include <armnnUtils/TensorUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010016#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010017#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000018#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010019#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010020
21// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000022#include <armnnUtils/Permute.hpp>
Rob Hughes9542f902021-07-14 09:48:54 +010023#include <armnnUtils/Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000024
Sadik Armagan479045b2018-10-01 11:51:37 +010025#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026#include <VerificationHelpers.hpp>
27
28// The generated code based on the Tf Lite schema:
29#include <schema_generated.h>
30
Matteo Martincighe011d202019-11-28 11:35:47 +000031#include <flatbuffers/flexbuffers.h>
32
James Ward58dec6b2020-09-11 17:32:44 +010033#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010034
telsoa01c577f2c2018-08-31 09:22:23 +010035#include <algorithm>
Matthew Sloyanac001ee2021-02-03 10:43:04 +000036#include <fstream>
37#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010038#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010039#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000040#include <sstream>
41
42#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
43 { \
44 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
45 << ": " \
46 << CHECK_LOCATION().AsString()).str()); \
47 }
telsoa01c577f2c2018-08-31 09:22:23 +010048
49using namespace armnn;
50using armnn::CheckLocation;
51namespace armnnTfLiteParser
52{
Kevin May7d96b162021-02-03 17:38:41 +000053
54ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
55 pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
56
57ITfLiteParser::~ITfLiteParser() = default;
58
59ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
60{
61 return new ITfLiteParser(options);
62}
63
64ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
65{
66 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
67}
68
69void ITfLiteParser::Destroy(ITfLiteParser* parser)
70{
71 delete parser;
72}
73
74armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
75{
76 return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
77}
78
79armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
80{
81 return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
82}
83
84BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
85 const std::string& name) const
86{
87 return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
88}
89
90BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
91 const std::string& name) const
92{
93 return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
94}
95
96size_t ITfLiteParser::GetSubgraphCount() const
97{
98 return pTfLiteParserImpl->GetSubgraphCount();
99}
100
101std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
102{
103 return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
104}
105
106std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
107{
108 return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
109}
110
telsoa01c577f2c2018-08-31 09:22:23 +0100111namespace
112{
jimfly01c25411c2018-11-14 17:47:22 +0000113
telsoa01c577f2c2018-08-31 09:22:23 +0100114const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
115
Kevin May7d96b162021-02-03 17:38:41 +0000116void CheckSubgraph(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100117 size_t subgraphIndex,
118 const CheckLocation & location)
119{
120 if (model.get() == nullptr)
121 {
122 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100123 fmt::format("{} was called with invalid (null) model. "
124 "Possible reason is that the model is not yet loaded and Unpack(ed). "
125 "subgraph:{} at {}",
126 location.m_Function,
127 subgraphIndex,
128 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100129 }
130 else if (subgraphIndex >= model->subgraphs.size())
131 {
132 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100133 fmt::format("{} was called with an invalid subgraph index. "
134 "subgraph:{} at {}",
135 location.m_Function,
136 subgraphIndex,
137 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100138 }
139}
140
141#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
142 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
143
Kevin May7d96b162021-02-03 17:38:41 +0000144void CheckModel(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100145 size_t subgraphIndex,
146 size_t operatorIndex,
147 const CheckLocation & location)
148{
149 if (model.get() == nullptr)
150 {
151 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100152 fmt::format("{} was called with invalid (null) model. "
153 "Possible reason is that the model is not yet loaded and Unpack(ed). "
154 "subgraph:{} operator:{} at {}",
155 location.m_Function,
156 subgraphIndex,
157 operatorIndex,
158 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100159 }
160 else if (subgraphIndex >= model->subgraphs.size())
161 {
162 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100163 fmt::format("{} was called with an invalid subgraph index. "
164 "subgraph:{} operator:{} at {}",
165 location.m_Function,
166 subgraphIndex,
167 operatorIndex,
168 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100169 }
170 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
171 operatorIndex != VIRTUAL_OPERATOR_ID)
172 {
173 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100174 fmt::format("{} was called with an invalid operator index. "
175 "subgraph:{} operator:{} at {}",
176 location.m_Function,
177 subgraphIndex,
178 operatorIndex,
179 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100180 }
181}
182
183#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
184 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
185
Kevin May7d96b162021-02-03 17:38:41 +0000186void CheckTensor(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100187 size_t subgraphIndex,
188 size_t tensorIndex,
189 const CheckLocation & location)
190{
191 // not checking model, because I assume CHECK_MODEL already run
192 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100193 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100194
195 // also subgraph index should be checked by CHECK_MODEL so
196 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100197 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100198
199 // the tensor index is the only one to check here
200 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
201 {
202 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100203 fmt::format("{} was called with an invalid tensor index. "
204 "subgraph:{} tensor:{} at {}",
205 location.m_Function,
206 subgraphIndex,
207 tensorIndex,
208 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100209 }
210}
211
212#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
213 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
214
Kevin May7d96b162021-02-03 17:38:41 +0000215void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100216 const CheckLocation & location)
217{
218 if (rawPtr == nullptr)
219 {
220 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100221 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100222 }
223}
224
225#define CHECK_TENSOR_PTR(TENSOR_PTR) \
226 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
227
Kevin May7d96b162021-02-03 17:38:41 +0000228void CheckBuffer(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100229 size_t bufferIndex,
230 const CheckLocation & location)
231{
232 if (model.get() == nullptr)
233 {
234 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100235 fmt::format("{} was called with invalid (null) model. "
236 "Possible reason is that the model is not yet loaded and Unpack(ed). "
237 "buffer:{} at {}",
238 location.m_Function,
239 bufferIndex,
240 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100241 }
242 else if (bufferIndex >= model->buffers.size())
243 {
244 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100245 fmt::format("{} was called with an invalid buffer index. "
246 "buffer index:{} at {}",
247 location.m_Function,
248 bufferIndex,
249 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100250 }
251 else if (model->buffers[bufferIndex].get() == nullptr)
252 {
253 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100254 fmt::format("The buffer #{} is null. {}",
255 bufferIndex,
256 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100257 }
258}
259
260#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
261 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
262
Kevin May7d96b162021-02-03 17:38:41 +0000263void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100264 const armnn::TensorInfo & tensorInfo,
265 uint32_t bufferId,
266 const CheckLocation & location)
267{
268 if (bufferPtr == nullptr)
269 {
270 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100271 fmt::format("BufferPtr is null for buffer:{}. {}",
272 bufferId,
273 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100274 }
275 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
276 tensorInfo.GetNumBytes() > bufferPtr->data.size())
277 {
278 std::stringstream ss;
279 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
280 << "For tensor: " << tensorInfo.GetShape()
281 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
282 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
283 throw ParseException(ss.str());
284 }
285}
286
287#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
288 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
289
290bool IsActivationSupported(tflite::ActivationFunctionType activationType)
291{
292 switch(activationType)
293 {
294 case tflite::ActivationFunctionType_NONE:
295 case tflite::ActivationFunctionType_RELU:
296 case tflite::ActivationFunctionType_RELU6:
297 case tflite::ActivationFunctionType_TANH:
298 {
299 return true;
300 }
301 default:
302 {
303 return false;
304 }
305 }
306}
307
308#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
309 do { \
310 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
311 { \
312 throw ParseException( \
James Ward58dec6b2020-09-11 17:32:44 +0100313 fmt::format("TfLite parser doesn't suppport fused activation: " \
314 "{}/{} in {} subgraph:{} operator:{} at {}", \
315 OPTION->fused_activation_function, \
316 tflite::EnumNameActivationFunctionType(\
317 OPTION->fused_activation_function), \
318 __func__, \
319 SUBGRAPH_INDEX, \
320 OPERATOR_INDEX, \
321 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100322 } \
323 } while(false)
324
325
326std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
327{
328 std::vector<unsigned int> result;
329 result.reserve(in.size());
330 for (auto & i : in)
331 {
mathad01c21025d2021-04-26 10:09:37 +0100332 // If the location of the input data is -1 then the input should be ignored.
333 if (i == -1)
334 {
335 continue;
336 }
telsoa01c577f2c2018-08-31 09:22:23 +0100337 result.push_back(CHECKED_NON_NEGATIVE(i));
338 }
339 return result;
340}
341
342void CalcPadding(uint32_t inputSize,
343 uint32_t filterSize,
344 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100345 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100346 uint32_t& paddingFront,
347 uint32_t& paddingBack,
348 tflite::Padding padding)
349{
350 paddingFront = 0;
351 paddingBack = 0;
352 if (padding == tflite::Padding_SAME)
353 {
354 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100355 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
356 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100357 if (temp > inputSize)
358 {
359 paddingFront = (temp - inputSize) / 2;
360 paddingBack = (temp - inputSize) - paddingFront;
361 }
362 }
363}
364
Kevin May7d96b162021-02-03 17:38:41 +0000365armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Finn Williamsb49ed182021-06-29 15:50:08 +0100366 const std::vector<unsigned int>& shape,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100367 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100368{
369 armnn::DataType type;
370 CHECK_TENSOR_PTR(tensorPtr);
371
372 switch (tensorPtr->type)
373 {
374 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000375 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100376 break;
377 case tflite::TensorType_FLOAT32:
378 type = armnn::DataType::Float32;
379 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000380 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000381 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000382 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000383 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000384 type = armnn::DataType::QAsymmS8;
385 }
386 else
387 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000388 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000389 type = armnn::DataType::QSymmS8;
390 }
Finn Williamsed66d142019-12-06 09:55:55 +0000391 break;
392 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000393 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000394 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100395 case tflite::TensorType_INT32:
396 type = armnn::DataType::Signed32;
397 break;
Inki Daed4619e22020-09-10 15:33:54 +0900398 case tflite::TensorType_INT64:
399 type = armnn::DataType::Signed64;
400 break;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100401 case tflite::TensorType_BOOL:
402 type = armnn::DataType::Boolean;
403 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100404 default:
405 {
406 CheckLocation location = CHECK_LOCATION();
407 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100408 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
409 tensorPtr->type,
410 tflite::EnumNameTensorType(tensorPtr->type),
411 tensorPtr->name,
412 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100413 }
414 }
Finn Williamsb49ed182021-06-29 15:50:08 +0100415 TensorShape tensorShape;
416
417 std::vector<unsigned int> safeShape = shape;
418 if (shape.size() == 0)
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100419 {
420 safeShape.push_back(1);
Finn Williamsb49ed182021-06-29 15:50:08 +0100421 }
422
423 if (!outputTensor)
424 {
425 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()), safeShape.data());
426 }
427 else
428 {
429 unsigned long shapeSignatureSize = tensorPtr->shape_signature.size();
430
431 // If a shape signature exists we will use that to infer dynamic tensors
432 if (shapeSignatureSize != 0)
Sadik Armagand109a4d2020-07-28 10:42:13 +0100433 {
Finn Williamsb49ed182021-06-29 15:50:08 +0100434 // If the shape is incompatible with the shape signature override the shape
435 if (shapeSignatureSize != shape.size())
436 {
437 safeShape = {};
438
439 for (unsigned int i = 0; i < shapeSignatureSize; ++i)
440 {
441 unsigned int dim = tensorPtr->shape_signature[i] > -1 ?
442 static_cast<unsigned int>(tensorPtr->shape_signature[i]) : 0;
443 safeShape.push_back(dim);
444 }
445 }
446
447 bool dimMask[tensorPtr->shape_signature.size()];
448 for (unsigned int i = 0; i < tensorPtr->shape_signature.size(); ++i)
449 {
450 dimMask[i] = tensorPtr->shape_signature[i] == -1 ? false : true;
451 }
452 tensorShape = TensorShape(static_cast<unsigned int>(safeShape.size()), safeShape.data(), dimMask);
453 }
454 // If there is no shape signature treat the tensor as dynamic if the shape has a size of zero
455 else if (shape.size() == 0)
456 {
457 tensorShape = TensorShape(1, false);
458 }
459 else
460 {
461 tensorShape = TensorShape(armnn::numeric_cast<unsigned int>(shape.size()), shape.data());
Sadik Armagand109a4d2020-07-28 10:42:13 +0100462 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100463 }
464
Keith Davisd305e1a2020-01-22 11:57:54 +0000465 float quantizationScale = 0.0f;
466 int32_t quantizationOffset = 0;
467
468 if (tensorPtr->quantization.get())
469 {
470 if (tensorPtr->quantization->scale.size() <= 1)
471 {
472 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
473 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
474
475 if (tensorPtr->quantization->scale.size() == 1)
476 {
477 quantizationScale = tensorPtr->quantization->scale[0];
478 }
479 if (tensorPtr->quantization->zero_point.size() == 1)
480 {
481 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000482 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100483 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000484 }
485
Sadik Armagand109a4d2020-07-28 10:42:13 +0100486 armnn::TensorInfo result(tensorShape,
487 type,
488 quantizationScale,
489 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000490 return result;
491 }
492 else
493 {
494 std::vector<float> quantizationScales;
495 std::vector<int32_t> quantizationOffsets;
496
497 // Scale
498 std::copy(tensorPtr->quantization->scale.begin(),
499 tensorPtr->quantization->scale.end(),
500 std::back_inserter(quantizationScales));
501
Keith Davis0c2eeac2020-02-11 16:51:50 +0000502 // QSymmS8 Per-axis
Sadik Armagand109a4d2020-07-28 10:42:13 +0100503 armnn::TensorInfo result(tensorShape,
504 type,
505 quantizationScales,
Jan Eilers7612bd62021-04-06 17:29:03 +0100506 armnn::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
Keith Davisd305e1a2020-01-22 11:57:54 +0000507 return result;
508 }
509 }
510 else
511 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100512 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000513 type,
514 quantizationScale,
515 quantizationOffset);
516 return result;
517 }
telsoa01c577f2c2018-08-31 09:22:23 +0100518}
519
Jan Eilers7612bd62021-04-06 17:29:03 +0100520armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr)
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000521{
522 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100523 return ToTensorInfo(tensorPtr, dimensions);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000524}
525
Kevin May7d96b162021-02-03 17:38:41 +0000526armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100527 const bool outputTensor)
528{
529 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100530 return ToTensorInfo(tensorPtr, dimensions, outputTensor);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100531}
532
telsoa01c577f2c2018-08-31 09:22:23 +0100533template<typename T>
534std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
Kevin May7d96b162021-02-03 17:38:41 +0000535CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
536 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000537 armnn::TensorInfo& tensorInfo,
538 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100539{
Jan Eilers8eb25602020-03-09 12:13:48 +0000540 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100541 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
542 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
James Ward58dec6b2020-09-11 17:32:44 +0100543 fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
telsoa01c577f2c2018-08-31 09:22:23 +0100544
545 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000546
547 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
548 {
549 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000550 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
551 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000552 }
553 else
554 {
555 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
556 }
557
telsoa01c577f2c2018-08-31 09:22:23 +0100558 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
559}
560
telsoa01c577f2c2018-08-31 09:22:23 +0100561armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
562{
563 // generate the binding id by shifting the tensor id by 8 bit
564 // and add the subgraph id, which allows 256 subgraphs
565 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
566}
567
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000568bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
569{
570 const unsigned int actualSize = actual.GetNumDimensions();
571 if (actualSize != expected.size())
572 {
573 return false;
574 }
575
576 for (unsigned int i = 0u; i < actualSize; i++)
577 {
578 if (expected[i] < 0 ||
579 actual[i] != static_cast<unsigned int>(expected[i]))
580 {
581 return false;
582 }
583 }
584
585 return true;
586}
587
James Conroy05102392020-06-24 15:39:55 +0100588void CheckMatchingQuantization(const TensorInfo& first,
589 const TensorInfo& second,
590 const std::string& descName,
591 std::string const& firstName,
592 std::string const& secondName)
593{
594 if (!first.IsQuantized() ||
595 !second.IsQuantized())
596 {
597 // Not a quantized type, ignore the validation
598 return;
599 }
600
601 DataType firstDataType = first.GetDataType();
602 DataType secondDataType = second.GetDataType();
603
604 if (firstDataType != secondDataType)
605 {
606 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
607 " must be of the same quantized type, " +
608 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
609 secondName + " is " + GetDataTypeName(secondDataType));
610 }
611
612 if (!first.IsTypeSpaceMatch(second))
613 {
614 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
615 " must have the same quantization space, " +
616 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
617 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
618 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
619 " and scale " + std::to_string(second.GetQuantizationScale()));
620 }
621}
622
telsoa01c577f2c2018-08-31 09:22:23 +0100623} // <anonymous>
624
Kevin May7d96b162021-02-03 17:38:41 +0000625TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100626: m_Options(options)
627, m_Network(nullptr, nullptr)
Kevin May7d96b162021-02-03 17:38:41 +0000628, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
telsoa01c577f2c2018-08-31 09:22:23 +0100629{
630 // register supported operators
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100631 m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
Kevin May7d96b162021-02-03 17:38:41 +0000632 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100633 m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
634 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
Kevin May7d96b162021-02-03 17:38:41 +0000635 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
636 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
mathad01b392e982021-04-07 12:07:30 +0100637 m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
Kevin May7d96b162021-02-03 17:38:41 +0000638 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
639 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
640 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
641 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
642 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
643 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100644 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000645 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300646 m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000647 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
Teresa Charlin3ab85482021-06-08 16:59:29 +0100648 m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
Kevin May7d96b162021-02-03 17:38:41 +0000649 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
650 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300651 m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
652 m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000653 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
654 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300655 m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
656 m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100657 m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
Kevin May7d96b162021-02-03 17:38:41 +0000658 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
659 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
660 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
661 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
662 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
663 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
664 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
665 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
Bruno Goncalves2d0eb862021-07-11 14:10:15 -0300666 m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
Kevin May7d96b162021-02-03 17:38:41 +0000667 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
668 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +0100669 m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
Kevin May7d96b162021-02-03 17:38:41 +0000670 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
671 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
672 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
Sadik Armagana2747482021-02-09 10:28:54 +0000673 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
674 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
Kevin May7d96b162021-02-03 17:38:41 +0000675 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
676 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
677 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100678 m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
Keith Davis0176fd82021-06-01 17:36:32 +0100679 m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
Kevin May7d96b162021-02-03 17:38:41 +0000680 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
681 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
682 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
683 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
684 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
685 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
686 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
687 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
688 m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
689 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
690 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
691 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
692 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100693
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100694 // register supported custom operators
Kevin May7d96b162021-02-03 17:38:41 +0000695 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100696}
697
Kevin May7d96b162021-02-03 17:38:41 +0000698void TfLiteParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100699{
700 m_Network = armnn::INetworkPtr(nullptr, nullptr);
701 m_Model = nullptr;
702 m_SubgraphConnections.clear();
703}
704
Kevin May7d96b162021-02-03 17:38:41 +0000705INetworkPtr TfLiteParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100706{
707 ResetParser();
708 m_Model = LoadModelFromFile(graphFile);
709 return CreateNetworkFromModel();
710}
711
Kevin May7d96b162021-02-03 17:38:41 +0000712INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
telsoa01c577f2c2018-08-31 09:22:23 +0100713{
714 ResetParser();
715 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
716 return CreateNetworkFromModel();
717}
718
Finn Williamsb49ed182021-06-29 15:50:08 +0100719
720armnn::INetworkPtr TfLiteParserImpl::LoadModel(std::unique_ptr<tflite::ModelT> model)
721{
722 ResetParser();
723 m_Model = std::move(model);
724
725 return CreateNetworkFromModel();
726}
727
Kevin May7d96b162021-02-03 17:38:41 +0000728INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
telsoa01c577f2c2018-08-31 09:22:23 +0100729{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100730
731 using NetworkOptions = std::vector<BackendOptions>;
732 NetworkOptions networkOptions = {};
733 if (m_Options && m_Options.value().m_InferAndValidate)
734 {
735 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
736 {
737 { "InferAndValidate", true }
738 });
739
740 networkOptions.push_back(shapeInferenceMethodOption);
741 }
742
743 m_Network = INetwork::Create(networkOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100744 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100745
telsoa01c577f2c2018-08-31 09:22:23 +0100746 if (m_Model->subgraphs.size() != 1)
747 {
748 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100749 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
750 m_Model->subgraphs.size(),
751 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100752 }
753
754 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +0100755 size_t operatorIndex = 0;
756 try
telsoa01c577f2c2018-08-31 09:22:23 +0100757 {
Colm Donelan6350d272020-06-09 16:56:25 +0100758 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100759 {
Colm Donelan6350d272020-06-09 16:56:25 +0100760 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
761 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +0100762 {
Colm Donelan6350d272020-06-09 16:56:25 +0100763 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
telsoa01c577f2c2018-08-31 09:22:23 +0100764 auto builtinCode = opCodePtr->builtin_code;
765
766 if (builtinCode > tflite::BuiltinOperator_MAX)
767 {
James Ward58dec6b2020-09-11 17:32:44 +0100768 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
769 "subgraph:{} operator idx:{}. {}",
770 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
771 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100772 }
773
774 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +0100775 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +0100776 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +0100777 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +0100778 }
telsoa01c577f2c2018-08-31 09:22:23 +0100779
Colm Donelan6350d272020-06-09 16:56:25 +0100780 SetupInputLayers(subgraphIndex);
781 SetupOutputLayers(subgraphIndex);
782 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100783
Colm Donelan6350d272020-06-09 16:56:25 +0100784 ++subgraphIndex;
785 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +0100786 }
telsoa01c577f2c2018-08-31 09:22:23 +0100787 }
Colm Donelan6350d272020-06-09 16:56:25 +0100788 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +0100789 {
Colm Donelan6350d272020-06-09 16:56:25 +0100790 std::stringstream errorString;
791 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
792 << subgraphIndex << " error: " << e.what();
793 ARMNN_LOG(error) << errorString.str();
794 std::stringstream errors;
795 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +0100796 throw ParseException(errors.str());
797 }
798
799 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +0100800 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100801 {
802 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
803 {
804 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
805 {
806 for (size_t inputSlotIdx = 0;
807 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
808 ++inputSlotIdx)
809 {
810 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
811 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
812 }
813 }
814 }
815 }
816
817 return std::move(m_Network);
818}
819
Kevin May7d96b162021-02-03 17:38:41 +0000820void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
821 size_t tensorIndex,
822 armnn::IOutputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +0100823{
824 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100825 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
826 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100827
828 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
829
830 // assuming there is only one producer for that tensor
831 if (tensorSlots.outputSlot != nullptr)
832 {
James Ward58dec6b2020-09-11 17:32:44 +0100833 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
834 "subgraph:{} tensor:{} {}",
835 subgraphIndex,
836 tensorIndex,
837 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100838 }
839
840 tensorSlots.outputSlot = slot;
841}
842
Kevin May7d96b162021-02-03 17:38:41 +0000843void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
844 size_t tensorIndex,
845 armnn::IInputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +0100846{
847 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100848 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
849 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100850
Finn Williamsd4fa5452021-03-01 12:31:41 +0000851 TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +0100852 tensorSlots.inputSlots.push_back(slot);
853}
854
Kevin May7d96b162021-02-03 17:38:41 +0000855void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100856{
857 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
858
859 // NOTE: By default we presume the custom operator is not supported
Kevin May7d96b162021-02-03 17:38:41 +0000860 auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100861
862 // Identify custom code defined for custom operator
863 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
864 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
865
866 // Find parser function that correspondes to custom code (if any)
867 auto iterator = m_CustomParserFunctions.find(customCode);
868 if (iterator != m_CustomParserFunctions.end())
869 {
870 customParserFunction = iterator->second;
871 }
872
873 // Run parser function
874 (this->*customParserFunction)(subgraphIndex, operatorIndex);
875}
876
Kevin May7d96b162021-02-03 17:38:41 +0000877void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100878{
879 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100880
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100881 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
882
883 auto opcodeIndex = operatorPtr->opcode_index;
884 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
885
886 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
887 {
888 // Do not add StandInLayer, throw ParseException instead
889 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100890 fmt::format("Operator not supported. "
891 "subgraph:{} operator:{} "
892 "opcode_index:{} opcode:{} / {} {}",
893 subgraphIndex,
894 operatorIndex,
895 opcodeIndex,
896 opcode,
897 tflite::EnumNameBuiltinOperator(opcode),
898 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100899 }
900
901 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
902 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
903
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100904 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
905 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100906
907 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +0100908 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100909
910 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
911 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +0100912 ARMNN_ASSERT(layer != nullptr);
913
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100914 for (unsigned int i = 0u; i < numOutputs; ++i)
915 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100916 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100917 }
918
919 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
920 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
921
922 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
923 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100924}
925
mathad01b392e982021-04-07 12:07:30 +0100926void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex)
927{
928 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
929
930 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
931 CHECK_VALID_SIZE(inputs.size(), 1);
932 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
933 CHECK_VALID_SIZE(outputs.size(), 1);
934
935 auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex);
936
937 IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str());
938 ARMNN_ASSERT(layer != nullptr);
939
940 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
941 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
942
943 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
944 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
945
946 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
947 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
948}
949
Kevin May7d96b162021-02-03 17:38:41 +0000950void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100951{
952 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
953
954 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
955 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
956
957 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
958
959 Convolution2dDescriptor desc;
960 desc.m_BiasEnabled = false;
961 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
962 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000963 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100964 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
965 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000966
telsoa01c577f2c2018-08-31 09:22:23 +0100967 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
968 CHECK_VALID_SIZE(inputs.size(), 2, 3);
969
970 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
971 CHECK_VALID_SIZE(outputs.size(), 1);
972
973 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
974 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
975
976 // assuming input is NHWC
977 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
978 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
979
980 // assuming the filter is OHWI : Output, H, W, Input
981 // which is essentially the same as NHWC
982 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
983 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
984
Pablo Tellof0bd6832019-04-26 17:58:13 +0100985 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
986 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
987 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
988 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100989
Finn Williamsd4fa5452021-03-01 12:31:41 +0000990 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100991 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100992
James Ward58dec6b2020-09-11 17:32:44 +0100993 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100994
995 if (inputs.size() == 3)
996 {
997 desc.m_BiasEnabled = true;
998 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +0000999 auto biasTensorAndData = CreateConstTensorNonPermuted(inputs[2], biasTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001000 layer = m_Network->AddConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001001 filterTensorAndData,
1002 Optional<ConstTensor>(biasTensorAndData),
telsoa01c577f2c2018-08-31 09:22:23 +01001003 layerName.c_str());
1004 }
1005 else
1006 {
1007 layer = m_Network->AddConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001008 filterTensorAndData,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001009 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +01001010 layerName.c_str());
1011 }
1012
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001013 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001014
Sadik Armagand109a4d2020-07-28 10:42:13 +01001015 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +00001016 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001017
1018 // register the input connection slots for the layer, connections are made after all layers have been created
1019 // only the tensors for the inputs are relevant, exclude the const tensors
1020 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001021 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +01001022
jimfly01c25411c2018-11-14 17:47:22 +00001023 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001024 // register the output connection slots for the layer, connections are made after all layers have been created
1025 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1026 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1027}
1028
Kevin May7d96b162021-02-03 17:38:41 +00001029void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001030{
1031 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1032
1033 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1034 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
1035
1036 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1037
1038 DepthwiseConvolution2dDescriptor desc;
1039 desc.m_BiasEnabled = false;
1040 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1041 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001042 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001043 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +01001044
1045 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1046 CHECK_VALID_SIZE(inputs.size(), 2, 3);
1047 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1048 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +01001049 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1050 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001051
telsoa01c577f2c2018-08-31 09:22:23 +01001052 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Jan Eilers7612bd62021-04-06 17:29:03 +01001053 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
telsoa01c577f2c2018-08-31 09:22:23 +01001054
Matteo Martincigh747ef822018-12-18 09:26:39 +00001055 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +01001056 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1057 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +00001058
1059 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +01001060 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1061 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1062
Pablo Tellof0bd6832019-04-26 17:58:13 +01001063 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1064 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1065 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1066 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001067
Jan Eilers53ef7952021-06-02 12:01:25 +01001068 // ArmNN uses the same filter tensor layout at TfLite [1, H, W, O] no need for any permutation
1069 auto filterTensor = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001070 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001071 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001072
1073 if (inputs.size() == 3)
1074 {
1075 desc.m_BiasEnabled = true;
1076 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00001077 auto biasTensorAndData = CreateConstTensorNonPermuted(inputs[2], biasTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001078 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
Jan Eilers53ef7952021-06-02 12:01:25 +01001079 filterTensor,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001080 Optional<ConstTensor>(biasTensorAndData),
telsoa01c577f2c2018-08-31 09:22:23 +01001081 layerName.c_str());
1082 }
1083 else
1084 {
1085 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
Jan Eilers53ef7952021-06-02 12:01:25 +01001086 filterTensor,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001087 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +01001088 layerName.c_str());
1089 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001090 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001091
Sadik Armagand109a4d2020-07-28 10:42:13 +01001092 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +00001093 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001094
1095 // register the input connection slots for the layer, connections are made after all layers have been created
1096 // only the tensors for the inputs are relevant, exclude the const tensors
1097 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001098 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +01001099
jimfly01c25411c2018-11-14 17:47:22 +00001100 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001101 // register the output connection slots for the layer, connections are made after all layers have been created
1102 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1103 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1104}
1105
Kevin May7d96b162021-02-03 17:38:41 +00001106void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsed66d142019-12-06 09:55:55 +00001107{
1108 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1109
1110 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1111 CHECK_VALID_SIZE(inputs.size(), 1);
1112
1113 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1114 CHECK_VALID_SIZE(outputs.size(), 1);
1115
James Ward58dec6b2020-09-11 17:32:44 +01001116 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +00001117
1118 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001119 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +00001120
Sadik Armagand109a4d2020-07-28 10:42:13 +01001121 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Finn Williamsed66d142019-12-06 09:55:55 +00001122 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1123
1124 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1125 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1126
1127 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1128 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1129}
1130
Teresa Charlin3ab85482021-06-08 16:59:29 +01001131void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorIndex)
1132{
1133 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1134
1135 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1136 CHECK_VALID_SIZE(inputs.size(), 2);
1137
1138 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1139 CHECK_VALID_SIZE(outputs.size(), 1);
1140
1141 auto layerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
1142
1143 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1144 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1145
1146 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1147
1148 ReshapeDescriptor reshapeDesc;
Finn Williamsb49ed182021-06-29 15:50:08 +01001149
1150 if (outputTensorInfo.GetShape().AreAllDimensionsSpecified())
1151 {
1152 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1153 }
1154 else
1155 {
1156 int32_t axis = inputs[1]->shape[0];
1157
1158 int32_t inputDimSize = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
1159
1160 if (axis > inputDimSize || axis < 0 - (inputDimSize + 1))
1161 {
1162 throw ParseException("axis must be in range [0 - (inputDimSize + 1), inputDimSize] inclusive");
1163 }
1164
1165 if(axis < 0)
1166 {
1167 axis = inputDimSize + axis + 1;
1168 }
1169
1170 unsigned int shape[inputDimSize + 1];
1171 unsigned int inputShapeIndex = 0;
1172 for (unsigned int i = 0; i < static_cast<unsigned int>(inputDimSize + 1); ++i)
1173 {
1174 if (i == static_cast<unsigned int>(axis))
1175 {
1176 shape[i] = 1;
1177 }
1178 else
1179 {
1180 shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
1181 ++inputShapeIndex;
1182 }
1183 }
1184
1185 reshapeDesc.m_TargetShape = TensorShape(static_cast<unsigned int>(inputDimSize + 1), shape);
1186 }
Teresa Charlin3ab85482021-06-08 16:59:29 +01001187
1188 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1189 ARMNN_ASSERT(layer != nullptr);
1190 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1191
1192 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1193 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1194
1195 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1196 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1197}
1198
Kevin May7d96b162021-02-03 17:38:41 +00001199void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
Keith Davis4cd29a02019-09-09 14:49:20 +01001200{
1201 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1202
1203 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001204 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001205
1206 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1207 CHECK_VALID_SIZE(outputs.size(), 1);
1208
James Ward58dec6b2020-09-11 17:32:44 +01001209 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001210 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001211
josh minorba424d22019-11-13 10:55:17 -06001212 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001213 {
1214 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1215 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001216 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1217 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001218 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001219 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001220
Mike Kelly08759e22020-03-02 11:41:31 +00001221 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001222 }
1223
James Conroy05102392020-06-24 15:39:55 +01001224 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001225 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001226 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001227
James Conroy05102392020-06-24 15:39:55 +01001228 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001229 ARMNN_ASSERT(layer != nullptr);
Keith Davis4cd29a02019-09-09 14:49:20 +01001230 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1231
1232 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1233 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1234
1235 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1236 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1237}
1238
Kevin May7d96b162021-02-03 17:38:41 +00001239void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001240{
1241 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1242
1243 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1244 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1245
1246 TransposeConvolution2dDescriptor desc;
1247 desc.m_BiasEnabled = false;
1248 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1249 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1250 desc.m_DataLayout = armnn::DataLayout::NHWC;
1251
1252 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
David Monahan61683802021-01-12 09:11:07 +00001253 if (inputs.size() == 4)
1254 {
1255 desc.m_BiasEnabled = true;
1256 }
1257 else
1258 {
1259 CHECK_VALID_SIZE(inputs.size(), 3);
1260 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001261
1262 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1263 CHECK_VALID_SIZE(outputs.size(), 1);
1264
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001265 if (inputs[0])
1266 {
1267 armnn::TensorInfo tensorInfo = ToTensorInfo(inputs[0]);
1268 std::vector<int> output_shape(tensorInfo.GetNumElements());
1269 if (tensorInfo.GetDataType() == DataType::Signed32)
1270 {
1271 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1272 }
1273 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1274 {
1275 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1276 {
1277 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1278 }
1279 }
1280 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1281 for (int dimension : output_shape)
1282 {
1283 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1284 }
1285 desc.m_OutputShapeEnabled = true;
1286 }
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001287 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001288 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1289
1290 // TfLite uses NHWC tensors
1291 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1292 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1293
1294 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1295 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1296
1297 CalcPadding(inputHeight,
1298 filterHeight,
1299 desc.m_StrideY,
1300 1, // DilationY
1301 desc.m_PadTop,
1302 desc.m_PadBottom,
1303 options->padding);
1304
1305 CalcPadding(inputWidth,
1306 filterWidth,
1307 desc.m_StrideX,
1308 1, // DilationX
1309 desc.m_PadLeft,
1310 desc.m_PadRight,
1311 options->padding);
1312
Finn Williamsd4fa5452021-03-01 12:31:41 +00001313 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001314
1315 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001316 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001317
David Monahan61683802021-01-12 09:11:07 +00001318 if (desc.m_BiasEnabled)
1319 {
1320 auto biasTensorInfo = ToTensorInfo(inputs[3]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00001321 auto biasConstTensor = CreateConstTensorNonPermuted(inputs[3], biasTensorInfo);
David Monahan61683802021-01-12 09:11:07 +00001322 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001323 filterTensorAndData,
1324 biasConstTensor,
David Monahan61683802021-01-12 09:11:07 +00001325 layerName.c_str());
1326 }
1327 else
1328 {
1329 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001330 filterTensorAndData,
David Monahan61683802021-01-12 09:11:07 +00001331 EmptyOptional(),
1332 layerName.c_str());
1333 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001334
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001335 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001336
Sadik Armagand109a4d2020-07-28 10:42:13 +01001337 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001338 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1339
1340 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1341 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001342 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001343
1344 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1345 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1346}
1347
Kevin May7d96b162021-02-03 17:38:41 +00001348void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001349{
1350 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1351}
1352
Kevin May7d96b162021-02-03 17:38:41 +00001353void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001354{
1355 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1356
1357 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1358 CHECK_VALID_SIZE(inputs.size(), 3);
1359
1360 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1361 CHECK_VALID_SIZE(outputs.size(), 1);
1362
1363 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1364 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1365
1366 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1367 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1368
1369 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1370 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1371
1372 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1373 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1374
1375 size_t step = 2;
1376 std::vector<std::pair<unsigned int, unsigned int>> crops;
1377 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1378 {
1379 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1380 }
1381
1382 armnn::BatchToSpaceNdDescriptor desc;
1383 desc.m_BlockShape = blockShape;
1384 desc.m_Crops = crops;
1385 desc.m_DataLayout = armnn::DataLayout::NHWC;
1386
James Ward58dec6b2020-09-11 17:32:44 +01001387 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001388
James Conroy05102392020-06-24 15:39:55 +01001389 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001390 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001391 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1392
1393 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1394 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001395 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1396
1397 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1398 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1399
1400 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1401 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1402}
1403
Kevin May7d96b162021-02-03 17:38:41 +00001404void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson28c94572019-07-18 10:47:03 +01001405{
1406 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1407
1408 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1409 CHECK_VALID_SIZE(inputs.size(), 1);
1410
1411 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1412 CHECK_VALID_SIZE(outputs.size(), 1);
1413
1414 L2NormalizationDescriptor desc;
1415 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001416 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001417 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1418
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001419 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001420
Sadik Armagand109a4d2020-07-28 10:42:13 +01001421 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson28c94572019-07-18 10:47:03 +01001422 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1423
1424 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1425 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1426
1427 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1428 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1429}
1430
Kevin May7d96b162021-02-03 17:38:41 +00001431void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001432{
1433 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1434}
1435
Kevin May7d96b162021-02-03 17:38:41 +00001436void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001437{
1438 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1439
1440 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1441 CHECK_VALID_SIZE(inputs.size(), 2);
1442
1443 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1444 CHECK_VALID_SIZE(outputs.size(), 1);
1445
James Ward58dec6b2020-09-11 17:32:44 +01001446 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001447
1448 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1449 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1450 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001451
Sadik Armagand109a4d2020-07-28 10:42:13 +01001452 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001453 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1454
1455 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1456 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001457 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1458
1459 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001460 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001461
1462 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1463 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1464}
1465
Kevin May7d96b162021-02-03 17:38:41 +00001466void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001467{
1468 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1469
1470 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1471 CHECK_VALID_SIZE(inputs.size(), 2);
1472
1473 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1474 CHECK_VALID_SIZE(outputs.size(), 1);
1475
James Ward58dec6b2020-09-11 17:32:44 +01001476 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001477
1478 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1479 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1480 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001481
Sadik Armagand109a4d2020-07-28 10:42:13 +01001482 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001483 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1484
1485 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1486 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001487 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1488
1489 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001490 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001491
1492 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1493 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1494}
1495
Kevin May7d96b162021-02-03 17:38:41 +00001496void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
1497 size_t operatorIndex,
1498 PoolingAlgorithm algorithm)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001499{
1500 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1501
1502 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1503 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1504
1505 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1506
1507 std::string layerName;
1508
1509 switch (algorithm)
1510 {
1511 case PoolingAlgorithm::Average:
1512 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001513 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001514 break;
1515 case PoolingAlgorithm::Max:
1516 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001517 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001518 break;
1519 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001520 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001521 }
1522
1523 Pooling2dDescriptor desc;
1524
1525 desc.m_PoolType = algorithm;
1526 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1527 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1528 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1529 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1530 desc.m_PaddingMethod = PaddingMethod::Exclude;
1531 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001532 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001533
1534 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1535 CHECK_VALID_SIZE(inputs.size(), 1);
1536 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1537
1538 // assuming input is NHWC
1539 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1540 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1541
Pablo Tellof0bd6832019-04-26 17:58:13 +01001542 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1543 desc.m_PadTop, desc.m_PadBottom, options->padding);
1544 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1545 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001546
1547 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1548 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001549
Sadik Armagand109a4d2020-07-28 10:42:13 +01001550 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001551 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1552
1553 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1554 ARMNN_ASSERT(layer != nullptr);
jimfly01c25411c2018-11-14 17:47:22 +00001555 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001556
1557 // register the input connection slots for the layer, connections are made after all layers have been created
1558 // only the tensors for the inputs are relevant, exclude the const tensors
1559 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001560 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001561
jimfly01c25411c2018-11-14 17:47:22 +00001562 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001563 // register the output connection slots for the layer, connections are made after all layers have been created
1564 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1565 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1566}
1567
Kevin May7d96b162021-02-03 17:38:41 +00001568void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
josh minorba424d22019-11-13 10:55:17 -06001569{
1570 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1571
1572 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1573 CHECK_VALID_SIZE(inputs.size(), 3);
1574 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1575 CHECK_VALID_SIZE(outputs.size(), 1);
1576
1577 SliceDescriptor desc;
1578
1579 // set begin tensor info for slice descriptor
1580 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1581 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1582
1583 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1584 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1585
1586 // set size tensor info for slice descriptor
1587 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1588 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1589
1590 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1591 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1592 desc = SliceDescriptor(begin, size);
1593
James Ward58dec6b2020-09-11 17:32:44 +01001594 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06001595
James Conroy05102392020-06-24 15:39:55 +01001596 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001597 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001598 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1599
1600 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
josh minorba424d22019-11-13 10:55:17 -06001601 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1602
1603 // register the input connection slots for the layer, connections are made after all layers have been created
1604 // only the tensors for the inputs are relevant, exclude the const tensors
1605 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1606 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1607
1608 // register the output connection slots for the layer, connections are made after all layers have been created
1609 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1610 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1611}
1612
Kevin May7d96b162021-02-03 17:38:41 +00001613void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001614{
1615 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1616 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1617 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1618
1619 SoftmaxDescriptor desc;
1620 desc.m_Beta = options->beta;
1621
1622 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1623 CHECK_VALID_SIZE(inputs.size(), 1);
1624 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1625 CHECK_VALID_SIZE(outputs.size(), 1);
1626
James Ward58dec6b2020-09-11 17:32:44 +01001627 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001628 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1629
Sadik Armagand109a4d2020-07-28 10:42:13 +01001630 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
telsoa01c577f2c2018-08-31 09:22:23 +01001631 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1632
1633 // register the input connection slots for the layer, connections are made after all layers have been created
1634 // only the tensors for the inputs are relevant, exclude the const tensors
1635 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1636 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1637
1638 // register the output connection slots for the layer, connections are made after all layers have been created
1639 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1640 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1641}
1642
Kevin May7d96b162021-02-03 17:38:41 +00001643void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001644{
1645 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1646
1647 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1648 CHECK_VALID_SIZE(inputs.size(), 3);
1649
1650 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1651 CHECK_VALID_SIZE(outputs.size(), 1);
1652
1653 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1654 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1655
1656 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1657 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1658
1659 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1660 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1661
1662 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1663 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1664
1665 size_t step = 2;
1666 std::vector<std::pair<unsigned int, unsigned int>> padList;
1667 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1668 {
1669 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1670 }
1671
1672 armnn::SpaceToBatchNdDescriptor desc;
1673 desc.m_BlockShape = blockShape;
1674 desc.m_PadList = padList;
1675 desc.m_DataLayout = armnn::DataLayout::NHWC;
1676
James Ward58dec6b2020-09-11 17:32:44 +01001677 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001678
James Conroy05102392020-06-24 15:39:55 +01001679 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001680 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001681 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1682
1683 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1684 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001685 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1686
1687 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1688 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1689
1690 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1691 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1692}
1693
Teresa Charlin3ab85482021-06-08 16:59:29 +01001694armnn::TensorInfo TfLiteParserImpl::OutputShapeOfSqueeze(std::vector<uint32_t> squeezeDims,
Kevin May7d96b162021-02-03 17:38:41 +00001695 const armnn::TensorInfo & inputTensorInfo)
telsoa01c577f2c2018-08-31 09:22:23 +01001696{
Teresa Charlin3ab85482021-06-08 16:59:29 +01001697 CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
telsoa01c577f2c2018-08-31 09:22:23 +01001698 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1699
1700 if (inputTensorInfo.GetNumDimensions() > 4)
1701 {
1702 std::stringstream ss;
1703 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1704 << " shape:" << inputTensorInfo.GetShape() << " "
1705 << CHECK_LOCATION().AsString();
1706 throw ParseException(ss.str());
1707 }
1708
1709 if (squeezeDims.empty())
1710 {
1711 squeezeDims.assign(dimensionSequence,
1712 dimensionSequence+inputTensorInfo.GetNumDimensions());
1713 }
1714
1715 std::vector<uint32_t> outputDims;
1716 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1717 {
1718 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1719 auto currentDimension = inputTensorInfo.GetShape()[i];
1720 if (skipSqueeze || currentDimension != 1)
1721 {
1722 outputDims.push_back(currentDimension);
1723 }
1724 }
1725
1726 if (outputDims.size() > 4)
1727 {
1728 std::stringstream ss;
1729 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1730 << " shape:" << inputTensorInfo.GetShape() << " "
1731 << CHECK_LOCATION().AsString();
1732 throw ParseException(ss.str());
1733 }
1734
1735 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1736 outputDims.data());
1737
1738 // we need to preserve the tensor type and the quantization data as well
1739 TensorInfo outTensorInfo = inputTensorInfo;
1740 outTensorInfo.SetShape(outShape);
1741
1742 return outTensorInfo;
1743}
1744
Keith Davis0176fd82021-06-01 17:36:32 +01001745void TfLiteParserImpl::ParseShape(size_t subgraphIndex, size_t operatorIndex)
1746{
1747 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1748
1749 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1750 CHECK_VALID_SIZE(inputs.size(), 1);
1751 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1752 CHECK_VALID_SIZE(outputs.size(), 1);
1753
1754 auto layerName = fmt::format("Shape:{}:{}", subgraphIndex, operatorIndex);
1755
1756 IConnectableLayer* layer = m_Network->AddShapeLayer(layerName.c_str());
1757 ARMNN_ASSERT(layer != nullptr);
1758
1759
1760 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1761 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1762
1763 // Check if output tensor type is Signed32 or Signed64
1764 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
1765 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
1766 {
1767 throw ParseException(
1768 fmt::format(
1769 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
1770 CHECK_LOCATION().AsString()));
1771 }
1772
1773 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1774 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1775
1776 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1777 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1778}
1779
Kevin May7d96b162021-02-03 17:38:41 +00001780void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001781{
1782 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1783
1784 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1785 CHECK_VALID_SIZE(inputs.size(), 1);
1786
1787 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1788 CHECK_VALID_SIZE(outputs.size(), 1);
1789
1790 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1791 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01001792 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001793
1794 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001795
1796 std::vector<uint32_t> squeezeDim;
1797 // A single negative dim index is interpreted as a negative index in python
1798 // Meaning the index will be the shape size plus the negative index value
1799 if (options->squeeze_dims.size() == 1 && options->squeeze_dims[0] < 0)
1800 {
1801 int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
1802 squeezeDim.push_back(static_cast<uint32_t>(dim));
1803 }
1804 else
1805 {
1806 squeezeDim = AsUnsignedVector(options->squeeze_dims);
1807 }
1808
1809 armnn::TensorInfo outputTensorInfo = TfLiteParserImpl::OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
1810
James Conroy05102392020-06-24 15:39:55 +01001811 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01001812
1813 ReshapeDescriptor reshapeDesc;
1814 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1815
telsoa01c577f2c2018-08-31 09:22:23 +01001816 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001817 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001818 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1819
1820 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1821 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1822
1823 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1824 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1825}
1826
Kevin May7d96b162021-02-03 17:38:41 +00001827void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001828{
1829 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1830
1831 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1832 CHECK_VALID_SIZE(inputs.size(), 4);
1833
1834 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1835 CHECK_VALID_SIZE(outputs.size(), 1);
1836
1837 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1838 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1839
1840 StridedSliceDescriptor desc;
1841 desc.m_BeginMask = options->begin_mask;
1842 desc.m_EllipsisMask = options->ellipsis_mask;
1843 desc.m_EndMask = options->end_mask;
1844 desc.m_NewAxisMask = options->new_axis_mask;
1845 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1846 desc.m_DataLayout = armnn::DataLayout::NHWC;
1847
1848 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1849 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1850
1851 std::vector<int> begin(beginTensorInfo.GetNumElements());
1852 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1853
1854 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1855 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1856
1857 std::vector<int> end(endTensorInfo.GetNumElements());
1858 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1859
1860 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1861 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1862
1863 std::vector<int> stride(strideTensorInfo.GetNumElements());
1864 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1865
1866 desc.m_Begin = begin;
1867 desc.m_End = end;
1868 desc.m_Stride = stride;
1869
James Ward58dec6b2020-09-11 17:32:44 +01001870 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001871 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001872 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001873
Sadik Armagand109a4d2020-07-28 10:42:13 +01001874 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001875 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1876
1877 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1878 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1879
1880 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1881 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1882}
1883
Kevin May7d96b162021-02-03 17:38:41 +00001884void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001885{
1886 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1887
1888 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1889 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1890
1891 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1892 CHECK_VALID_SIZE(inputs.size(), 2);
1893
1894 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1895 CHECK_VALID_SIZE(outputs.size(), 1);
1896
1897 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1898 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1899
James Ward58dec6b2020-09-11 17:32:44 +01001900 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001901 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001902 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001903
Sadik Armagand109a4d2020-07-28 10:42:13 +01001904 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001905 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1906
1907 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001908 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001909
1910 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1911
1912 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1913 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1914}
1915
Kevin May7d96b162021-02-03 17:38:41 +00001916void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301917{
1918 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1919
1920 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1921 const auto * options = operatorPtr->builtin_options.AsDivOptions();
1922
1923 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1924 CHECK_VALID_SIZE(inputs.size(), 2);
1925
1926 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1927 CHECK_VALID_SIZE(outputs.size(), 1);
1928
1929 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1930 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1931
James Ward58dec6b2020-09-11 17:32:44 +01001932 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301933 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001934 ARMNN_ASSERT(layer != nullptr);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301935
Sadik Armagand109a4d2020-07-28 10:42:13 +01001936 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301937 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1938
1939 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001940 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301941 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1942
1943 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1944 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1945}
1946
Kevin May7d96b162021-02-03 17:38:41 +00001947void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001948{
1949 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1950
1951 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1952 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1953
1954 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1955 CHECK_VALID_SIZE(inputs.size(), 2);
1956
1957 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1958 CHECK_VALID_SIZE(outputs.size(), 1);
1959
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001960 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1961 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1962
James Ward58dec6b2020-09-11 17:32:44 +01001963 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001964 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001965 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001966
Sadik Armagand109a4d2020-07-28 10:42:13 +01001967 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001968 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1969
1970 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001971 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001972 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1973
1974 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1975 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1976}
1977
Kevin May7d96b162021-02-03 17:38:41 +00001978void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001979{
1980 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1981
1982 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1983 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1984
1985 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1986 CHECK_VALID_SIZE(inputs.size(), 2);
1987
1988 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1989 CHECK_VALID_SIZE(outputs.size(), 1);
1990
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001991 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1992 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1993
James Ward58dec6b2020-09-11 17:32:44 +01001994 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001995 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001996 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001997
Sadik Armagand109a4d2020-07-28 10:42:13 +01001998 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001999 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2000
2001 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01002002 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02002003 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
2004
2005 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2006 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2007}
2008
Kevin May7d96b162021-02-03 17:38:41 +00002009void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002010{
2011 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2012
2013 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2014
2015 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2016 CHECK_VALID_SIZE(outputs.size(), 1);
2017
2018 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
2019 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2020
2021 armnn::MeanDescriptor desc;
2022 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
2023 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
2024 desc.m_Axis = axis;
2025
2026 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002027 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002028
2029 desc.m_KeepDims =
2030 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
2031 true : false;
2032
James Ward58dec6b2020-09-11 17:32:44 +01002033 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002034 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002035 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02002036
2037 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2038
2039 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2040 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2041
2042 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2043 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2044}
2045
Kevin May7d96b162021-02-03 17:38:41 +00002046void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002047{
2048 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2049
Kevin May7d96b162021-02-03 17:38:41 +00002050 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002051
Kevin May7d96b162021-02-03 17:38:41 +00002052 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002053 CHECK_VALID_SIZE(outputs.size(), 1);
2054
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002055 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2056
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002057 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
2058 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2059
2060 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
2061 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
2062
2063 size_t step = 2;
2064 armnn::PadDescriptor desc;
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00002065 if (inputTensorInfo.IsQuantized())
2066 {
2067 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
2068 }
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002069 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
2070 {
2071 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
2072 }
2073
James Ward58dec6b2020-09-11 17:32:44 +01002074 auto layerName = fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002075 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002076
2077 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
2078 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002079 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2080
2081 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2082 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2083
2084 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2085 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2086}
2087
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002088void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
2089{
2090 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2091
2092 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2093 CHECK_VALID_SIZE(inputs.size(), 2);
2094
2095 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2096 CHECK_VALID_SIZE(outputs.size(), 1);
2097
2098 auto layerName = fmt::format("Prelu:{}:{}", subgraphIndex, operatorIndex);
2099
2100 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2101 armnn::TensorInfo alphaTensorInfo = ToTensorInfo(inputs[1]);
2102 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
2103 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2104
2105 IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
2106 ARMNN_ASSERT(layer != nullptr);
2107 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2108
2109 if (IsConstTensor(inputs[1]))
2110 {
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002111 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawaratbf99b5f2021-05-27 09:55:43 +01002112 armnn::IInputSlot* slot = &(layer->GetInputSlot(0));
2113 RegisterConsumerOfTensor(subgraphIndex, inputTensorIndexes[0], slot);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002114
2115 auto alphaTensorAndData = CreateConstTensorNonPermuted(inputs[1], alphaTensorInfo);
2116 std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name);
2117 IConnectableLayer* constLayer =
2118 m_Network->AddConstantLayer(alphaTensorAndData, constLayerName.c_str());
2119 ARMNN_ASSERT(constLayer != nullptr);
2120
2121 constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
2122 constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
2123 RegisterOutputSlots(subgraphIndex,
2124 VIRTUAL_OPERATOR_ID,
2125 constLayer,
2126 { inputTensorIndexes[1] });
2127 }
2128 else
2129 {
2130 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2131 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIndexes);
2132 }
2133
2134 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2135 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2136}
2137
Kevin May7d96b162021-02-03 17:38:41 +00002138void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan66dedc72019-12-10 16:32:07 +00002139{
2140 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2141
2142 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2143 CHECK_VALID_SIZE(inputs.size(), 1);
2144
2145 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2146 CHECK_VALID_SIZE(outputs.size(), 1);
2147
James Ward58dec6b2020-09-11 17:32:44 +01002148 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002149
2150 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002151 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002152
Sadik Armagand109a4d2020-07-28 10:42:13 +01002153 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002154 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2155
2156 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2157 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2158
2159 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2160 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2161}
Finn Williamsc42c3842019-01-22 14:18:11 +00002162
Kevin May7d96b162021-02-03 17:38:41 +00002163void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002164{
Finn Williamsc42c3842019-01-22 14:18:11 +00002165 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01002166}
2167
Kevin May7d96b162021-02-03 17:38:41 +00002168void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002169{
Finn Williamsc42c3842019-01-22 14:18:11 +00002170 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
2171}
Sadik Armagan58f39192018-09-17 14:14:39 +01002172
Kevin May7d96b162021-02-03 17:38:41 +00002173void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan12239e72020-05-27 11:06:17 +01002174{
Jan Eilers2f746b32020-07-28 14:00:06 +01002175 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01002176}
2177
Kevin May7d96b162021-02-03 17:38:41 +00002178void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsc42c3842019-01-22 14:18:11 +00002179{
2180 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
2181}
2182
Kevin May7d96b162021-02-03 17:38:41 +00002183void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd99851762019-04-09 09:37:38 +01002184{
2185 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
2186}
2187
Kevin May7d96b162021-02-03 17:38:41 +00002188void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
Matthew Sloyan7515d072020-12-16 12:50:01 +00002189{
2190 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
2191}
2192
Kevin May7d96b162021-02-03 17:38:41 +00002193void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
Jan Eilers2f746b32020-07-28 14:00:06 +01002194{
2195 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
2196}
Finn Williamsc42c3842019-01-22 14:18:11 +00002197
Kevin May7d96b162021-02-03 17:38:41 +00002198void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
Finn Williamsc42c3842019-01-22 14:18:11 +00002199{
2200 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01002201 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00002202 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01002203
2204 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2205 CHECK_VALID_SIZE(inputs.size(), 1);
2206
2207 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2208 CHECK_VALID_SIZE(outputs.size(), 1);
2209
James Ward58dec6b2020-09-11 17:32:44 +01002210 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01002211 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00002212 activationDesc.m_Function = activationType;
2213
2214 switch (activationType)
2215 {
2216 case ActivationFunction::ReLu:
2217 {
James Ward58dec6b2020-09-11 17:32:44 +01002218 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002219 break;
2220 }
2221 case ActivationFunction::BoundedReLu:
2222 {
James Ward58dec6b2020-09-11 17:32:44 +01002223 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002224 activationDesc.m_A = 6.0f;
2225 activationDesc.m_B = 0.0f;
2226 break;
2227 }
2228 case ActivationFunction::Sigmoid:
2229 {
James Ward58dec6b2020-09-11 17:32:44 +01002230 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002231 break;
2232 }
Nina Drozd99851762019-04-09 09:37:38 +01002233 case ActivationFunction::TanH:
2234 {
James Ward58dec6b2020-09-11 17:32:44 +01002235 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01002236 activationDesc.m_A = 1.0f;
2237 activationDesc.m_B = 1.0f;
2238 break;
2239 }
Sadik Armagan12239e72020-05-27 11:06:17 +01002240 case ActivationFunction::LeakyReLu:
2241 {
James Ward58dec6b2020-09-11 17:32:44 +01002242 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan12239e72020-05-27 11:06:17 +01002243 const auto * options = operatorPtr->builtin_options.AsLeakyReluOptions();
2244 activationDesc.m_A = options->alpha;
2245 break;
2246 }
Matthew Sloyan7515d072020-12-16 12:50:01 +00002247 case ActivationFunction::Elu:
2248 {
2249 layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
2250 activationDesc.m_A = 1.0f;
2251 break;
2252 }
Jan Eilers2f746b32020-07-28 14:00:06 +01002253 case ActivationFunction::HardSwish:
Matthew Sloyan7515d072020-12-16 12:50:01 +00002254 {
James Ward58dec6b2020-09-11 17:32:44 +01002255 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01002256 break;
Matthew Sloyan7515d072020-12-16 12:50:01 +00002257 }
Finn Williamsc42c3842019-01-22 14:18:11 +00002258 default:
2259 {
2260 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002261 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
2262 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00002263 }
2264 }
2265
2266 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01002267
Sadik Armagand109a4d2020-07-28 10:42:13 +01002268 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan58f39192018-09-17 14:14:39 +01002269 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2270
2271 // register the input connection slots for the layer, connections are made after all layers have been created
2272 // only the tensors for the inputs are relevant, exclude the const tensors
2273 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2274 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2275
2276 // register the output connection slots for the layer, connections are made after all layers have been created
2277 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2278 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2279}
Kevin May7d96b162021-02-03 17:38:41 +00002280armnn::TensorInfo TfLiteParserImpl::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
2281 const std::vector<int32_t> & targetDimsIn)
Sadikb94967b2018-09-19 15:30:00 +01002282{
2283 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2284 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2285
2286 if (stretchDim != targetDimsIn.end())
2287 {
2288 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2289 {
2290 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002291 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01002292 }
2293
2294 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002295 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01002296 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2297
2298 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2299 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2300 }
2301
2302 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2303
2304 TensorInfo reshapeInfo = inputTensorInfo;
2305 reshapeInfo.SetShape(outputShape);
2306
2307 return reshapeInfo;
2308}
2309
Kevin May7d96b162021-02-03 17:38:41 +00002310void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
Sadikb94967b2018-09-19 15:30:00 +01002311{
2312 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2313
2314 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002315
2316 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2317 CHECK_VALID_SIZE(outputs.size(), 1);
2318
2319 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2320 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002321 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002322
2323 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00002324 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01002325 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00002326
Jan Eilersbac9b352020-07-13 13:40:24 +01002327 // Extracting new shape for the output
2328 // There are two ways it can be passed
2329 // * First is to define the target shape in the operator built-in options
2330 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00002331 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01002332 bool targetShapeFound = false;
2333 // Check if built-in options were given
2334 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00002335 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002336 // make sure the parameter is given
2337 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00002338 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002339 targetShape = options->new_shape;
2340 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00002341 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002342 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002343
2344 // If there is no built-in option given or if the built-in new_shape parameter was empty
2345 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00002346 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002347 // Check for a second input tensor
2348 if (inputs.size() > 1 && inputs[1] != nullptr)
2349 {
2350 if (inputs[1]->is_variable)
2351 {
2352 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
2353 }
2354
2355 if (inputs[1]->shape.size() != 1)
2356 {
2357 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
2358 }
2359
2360 if (inputs[1]->type != tflite::TensorType_INT32)
2361 {
2362 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
2363 }
2364
2365 // Extract target shape from input
2366 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2367 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
Sadik Armagan19a1c032021-01-20 12:17:00 +00002368 if (!values)
2369 {
2370 ARMNN_THROW_PARSE_EXCEPTION("Reshape operator target shape input buffer data is null");
2371 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002372 for (int i=0; i < inputs[1]->shape[0]; ++i)
2373 {
2374 targetShape.push_back(values[i]);
2375 }
2376 }
2377 else
Derek Lambertic9e52792020-03-11 11:42:26 +00002378 {
2379 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
2380 "At least one method required");
2381 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002382 }
2383
kevmay0171972a82018-12-17 14:28:03 +00002384 armnn::TensorInfo reshapeOutputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00002385 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01002386
kevmay0171972a82018-12-17 14:28:03 +00002387 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002388 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
2389 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00002390 {
2391 std::stringstream ss;
2392 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002393 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00002394 << " does not equal output shape "
2395 << actualOutputTensorInfo.GetShape()
2396 << ": "
2397 << CHECK_LOCATION().AsString();
2398 throw ParseException(ss.str());
2399 }
2400
Sadikb94967b2018-09-19 15:30:00 +01002401 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00002402 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01002403
Sadikb94967b2018-09-19 15:30:00 +01002404 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002405 ARMNN_ASSERT(layer != nullptr);
kevmay0171972a82018-12-17 14:28:03 +00002406 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01002407
2408 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2409 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2410
2411 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2412 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2413}
2414
Kevin May7d96b162021-02-03 17:38:41 +00002415void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002416{
Sadik Armagana3b31f02019-12-05 09:08:53 +00002417 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
2418}
2419
Kevin May7d96b162021-02-03 17:38:41 +00002420void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagana3b31f02019-12-05 09:08:53 +00002421{
2422 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
2423}
2424
Kevin May7d96b162021-02-03 17:38:41 +00002425void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
Sadik Armagana3b31f02019-12-05 09:08:53 +00002426{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002427 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2428
2429 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2430 CHECK_VALID_SIZE(inputs.size(), 2);
2431
2432 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2433 CHECK_VALID_SIZE(outputs.size(), 1);
2434
2435 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
2436
2437 // Data for the parsed tensor args (size) must be stored locally.
2438 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2439
2440 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2441 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2442
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002443 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002444 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002445 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002446 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2447 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002448
James Ward58dec6b2020-09-11 17:32:44 +01002449 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00002450
2451 switch (resizeMethod)
2452 {
2453 case ResizeMethod::Bilinear:
2454 {
James Ward58dec6b2020-09-11 17:32:44 +01002455 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002456
2457 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2458 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2459
David Monahan4a0c9b92020-05-30 09:48:39 +01002460 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002461 break;
2462 }
2463 case ResizeMethod::NearestNeighbor:
2464 {
James Ward58dec6b2020-09-11 17:32:44 +01002465 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00002466 break;
2467 }
2468 default:
2469 {
2470 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002471 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
2472 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00002473 }
2474 }
2475
James Conroy05102392020-06-24 15:39:55 +01002476 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002477 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002478 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2479
2480 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
2481 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002482 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2483
2484 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2485 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2486
2487 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2488 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2489}
2490
Kevin May7d96b162021-02-03 17:38:41 +00002491void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan479045b2018-10-01 11:51:37 +01002492{
2493 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2494
2495 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2496 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2497
2498 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2499
2500 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2501 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2502 CHECK_VALID_SIZE(outputs.size(), 1);
2503
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002504 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2505 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002506
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002507 const unsigned int concatDimInput = static_cast<unsigned int>(
2508 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002509
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002510 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2511 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002512
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002513 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002514
2515 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2516 {
2517 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2518
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002519 // This set up concatDescriptor view origin
2520 armnnUtils::ProcessConcatInputTensorInfo(
2521 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002522 }
2523
James Ward58dec6b2020-09-11 17:32:44 +01002524 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002525 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002526
Jim Flynn906f9462019-05-10 13:55:21 +01002527 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002528 ARMNN_ASSERT(layer != nullptr);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002529 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002530
James Conroy05102392020-06-24 15:39:55 +01002531 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002532 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002533
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002534 // add fused activation layer
2535 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002536
Sadik Armagan479045b2018-10-01 11:51:37 +01002537 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2538 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2539}
2540
Kevin May7d96b162021-02-03 17:38:41 +00002541void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002542{
2543 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2544
2545 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2546 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2547
2548 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2549
2550 FullyConnectedDescriptor desc;
2551 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002552 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002553
2554 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2555 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2556 CHECK_VALID_SIZE(outputs.size(), 1);
2557
2558 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2559
2560 // Fully Connected Layer accepts two dimensional weights input
2561 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2562 if (weightsDimension != 2)
2563 {
2564 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002565 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
2566 "Node {}",
2567 weightsDimension,
2568 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002569 }
2570
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002571 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01002572 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002573
Finn Williamsd4fa5452021-03-01 12:31:41 +00002574 Optional<ConstTensor> filterOptionalConstTensor;
2575
2576 desc.m_ConstantWeights = IsConstTensor(inputs[1]);
2577
Finn Williamsd4fa5452021-03-01 12:31:41 +00002578 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2579 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0]};
2580 if (desc.m_ConstantWeights)
2581 {
2582 filterOptionalConstTensor = Optional<ConstTensor>(CreateConstTensorNonPermuted(inputs[1], filterTensorInfo));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002583 }
2584 else
2585 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00002586 // Non const weights will need to be registered as inputs
2587 tensorIndexesToRegister.emplace_back(inputTensorIndexes[1]);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002588 }
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002589
Finn Williamsd4fa5452021-03-01 12:31:41 +00002590 Optional<ConstTensor> biasOptionalConstTensor;
2591 if (inputs.size() == 3)
2592 {
2593 desc.m_BiasEnabled = true;
2594 if (desc.m_ConstantWeights)
2595 {
2596 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
2597 biasOptionalConstTensor = Optional<ConstTensor>(CreateConstTensorNonPermuted(inputs[2], biasTensorInfo));
2598 }
2599 else
2600 {
2601 // Non const biases will need to be registered as inputs
2602 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
2603 }
2604 }
2605
2606 layer = m_Network->AddFullyConnectedLayer(desc,
2607 filterOptionalConstTensor,
2608 biasOptionalConstTensor,
2609 layerName.c_str());
2610
2611 ARMNN_ASSERT(layer != nullptr);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002612 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2613
Finn Williamsd4fa5452021-03-01 12:31:41 +00002614 unsigned int startingSlotIndex = 0;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002615 if (inputTensorInfo.GetNumDimensions() > 2)
2616 {
2617 // Add reshape to flatten to 2D [batch_size, input_size],
2618 // where "input_size" corresponds to the number of inputs to the layer,
2619 // matching the second dimension of weights,
2620 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2621 std::vector<unsigned int> reshapedDimensions(2);
2622 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2623 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2624
2625 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2626 {
2627 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002628 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
2629 reshapedDimensions[1],
2630 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002631 }
2632
2633 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2634 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2635
James Ward58dec6b2020-09-11 17:32:44 +01002636 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Finn Williamsd4fa5452021-03-01 12:31:41 +00002637 armnn::ReshapeDescriptor reshapeDescriptor;
2638 reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
2639 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor, layerName.c_str());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002640
2641 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2642 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2643
2644 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
Finn Williamsd4fa5452021-03-01 12:31:41 +00002645 // Fc layer connects to the reshape layer, so we skip the first input slot when registering fc's input slots
2646 tensorIndexesToRegister.erase(tensorIndexesToRegister.begin());
2647 startingSlotIndex = 1;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002648 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00002649
2650 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister, startingSlotIndex);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002651
Sadik Armagand109a4d2020-07-28 10:42:13 +01002652 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002653 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2654
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002655 // we need to add the activation layer and fortunately we don't need to care about the data layout
2656 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2657 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002658
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002659 // register the output connection slots for the layer, connections are made after all layers have been created
2660 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2661 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2662}
2663
Kevin May7d96b162021-02-03 17:38:41 +00002664void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
keidav011b3e2ea2019-02-21 10:07:37 +00002665{
2666 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2667
2668 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2669
2670 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2671 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2672 CHECK_VALID_SIZE(outputs.size(), 4);
2673
2674 // Obtain custom options from flexbuffers
2675 auto custom_options = operatorPtr->custom_options;
2676 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2677
2678 // Obtain descriptor information from tf lite
2679 DetectionPostProcessDescriptor desc;
2680 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2681 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2682 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2683 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2684 desc.m_NumClasses = m["num_classes"].AsUInt32();
2685 desc.m_ScaleH = m["h_scale"].AsFloat();
2686 desc.m_ScaleW = m["w_scale"].AsFloat();
2687 desc.m_ScaleX = m["x_scale"].AsFloat();
2688 desc.m_ScaleY = m["y_scale"].AsFloat();
2689
keidav0107d58c72019-02-26 11:57:39 +00002690 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002691 {
keidav0107d58c72019-02-26 11:57:39 +00002692 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002693 }
2694 if (!(m["detections_per_class"].IsNull()))
2695 {
2696 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2697 }
2698
2699 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2700 {
2701 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2702 "must be positive and less than or equal to 1.");
2703 }
2704
2705 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00002706 auto anchorTensorAndData = CreateConstTensorNonPermuted(inputs[2], anchorTensorInfo);
keidav011b3e2ea2019-02-21 10:07:37 +00002707
James Ward58dec6b2020-09-11 17:32:44 +01002708 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsd4fa5452021-03-01 12:31:41 +00002709 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData,
keidav011b3e2ea2019-02-21 10:07:37 +00002710 layerName.c_str());
2711
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002712 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00002713
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002714 // The model does not specify the output shapes.
2715 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2716 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2717 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2718 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2719 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2720 m_OverridenOutputShapes.push_back({ 1 });
2721
keidav011b3e2ea2019-02-21 10:07:37 +00002722 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2723 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002724 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002725 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2726 }
2727
2728 // Register the input connection slots for the layer, connections are made after all layers have been created
2729 // only the tensors for the inputs are relevant, exclude the const tensors
2730 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2731 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2732
2733 // Register the output connection slots for the layer, connections are made after all layers have been created
2734 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2735 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2736 outputTensorIndexes[1],
2737 outputTensorIndexes[2],
2738 outputTensorIndexes[3]});
2739}
2740
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002741/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
Kevin May7d96b162021-02-03 17:38:41 +00002742void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002743{
2744 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2745
2746 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2747 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2748 CHECK_VALID_SIZE(outputs.size(), 1);
2749
2750 if (inputs.size() < 1)
2751 {
2752 throw ParseException("Pack must have at least one input.");
2753 }
2754
2755 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2756 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2757
2758 StackDescriptor desc;
2759 desc.m_Axis = static_cast<uint32_t>(options->axis);
2760 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2761
2762 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2763 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2764 desc.m_InputShape = inputTensorInfo.GetShape();
2765
James Ward58dec6b2020-09-11 17:32:44 +01002766 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002767 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2768
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002769 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002770
Sadik Armagand109a4d2020-07-28 10:42:13 +01002771 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002772 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2773
2774 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2775 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2776
2777 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2778 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2779}
2780
Kevin May7d96b162021-02-03 17:38:41 +00002781void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd200e3802019-04-15 09:47:39 +01002782{
2783 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2784
2785 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2786 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2787
2788 // This unpackAxis indicates the axis to unpack
2789 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2790
2791 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2792 CHECK_VALID_SIZE(inputs.size(), 1);
2793
2794 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002795
2796 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2797 {
2798 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002799 fmt::format("The unpack axis: {} cannot be greater than or equal to "
2800 "the number of input dimension {} {}",
2801 unpackAxis,
2802 inputTensorInfo.GetNumDimensions(),
2803 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002804 }
2805
Nina Drozd200e3802019-04-15 09:47:39 +01002806 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2807 // If num is not defined, automatically infer from the length of the dimension axis.
2808 if(unpackNum == 0)
2809 {
2810 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2811 }
2812
2813 // If unpack number cannot be inferred and is still zero, throw ParseException.
2814 if(unpackNum == 0)
2815 {
2816 throw ParseException("Number to unpack must greater than zero.");
2817 }
2818
2819 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2820 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2821
2822 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2823 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2824
2825 // Add current input shape to unpackDimSizes
2826 for (unsigned int i = 0; i < inputDimSize; ++i)
2827 {
2828 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2829 }
2830
2831 if (unpackDimSizes[unpackAxis] != unpackNum)
2832 {
2833 throw ParseException("Number to unpack must be the same as length of the dimension to "
2834 "unpack along.");
2835 }
2836
2837 unpackDimSizes[unpackAxis] /= unpackNum;
2838
2839 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2840 for (unsigned int j = 0; j < unpackNum; ++j)
2841 {
2842 // Set the size of the views.
2843 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2844 {
2845 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2846 }
2847 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2848 }
2849
James Ward58dec6b2020-09-11 17:32:44 +01002850 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01002851 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002852 ARMNN_ASSERT(layer != nullptr);
Nina Drozd200e3802019-04-15 09:47:39 +01002853
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002854 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2855 unpackDimSizes.data());
2856
Nina Drozd200e3802019-04-15 09:47:39 +01002857 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2858 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2859
Finn Williamsb49ed182021-06-29 15:50:08 +01002860 std::vector<unsigned int> reshapeDims;
2861 for (unsigned int axis = 0; axis < splitOutShape.GetNumDimensions(); ++axis)
2862 {
2863 if (axis != unpackAxis)
2864 {
2865 reshapeDims.push_back(splitOutShape[axis]);
2866 }
2867 }
2868
2869 TensorShape reshapeOutputShape(splitOutShape.GetNumDimensions() -1, reshapeDims.data());
2870
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002871 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2872 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2873 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002874 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01002875 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002876 armnn::ReshapeDescriptor desc;
Finn Williamsb49ed182021-06-29 15:50:08 +01002877 desc.m_TargetShape = reshapeOutputShape;
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002878 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2879
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002880 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2881 outputTensorInfo.GetDataType(),
2882 outputTensorInfo.GetQuantizationScale(),
2883 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002884 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2885
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002886 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002887
2888 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2889 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2890 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2891 }
Nina Drozd200e3802019-04-15 09:47:39 +01002892}
2893
Kevin May7d96b162021-02-03 17:38:41 +00002894void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd0324f482019-04-08 10:52:10 +01002895{
2896 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2897
2898 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2899 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2900
2901 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2902
Nina Drozd200e3802019-04-15 09:47:39 +01002903 // If number of splits cannot be inferred and is zero, throw ParseException.
2904 if(numSplits == 0)
2905 {
2906 throw ParseException("Number to splits must greater than zero.");
2907 }
2908
Nina Drozd0324f482019-04-08 10:52:10 +01002909 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2910 CHECK_VALID_SIZE(inputs.size(), 2);
2911 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2912 CHECK_VALID_SIZE(outputs.size(), numSplits);
2913
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002914 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2915 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
2916 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Nina Drozd0324f482019-04-08 10:52:10 +01002917
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002918 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002919 if (axisBufferPtr == nullptr)
2920 {
2921 throw ParseException(
2922 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
2923 CHECK_LOCATION().AsString()));
2924 }
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002925
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002926 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
2927 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2928 int32_t axis = axisData[0];
2929
2930 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
2931 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
2932 {
2933 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
2934 // E.g. Rank 4 tensor can have axis in range [-4, 3)
2935 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
2936 throw ParseException(
2937 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
2938 axis,
2939 CHECK_LOCATION().AsString()));
2940 }
2941
2942 const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
Nina Drozd0324f482019-04-08 10:52:10 +01002943
Nina Drozd0324f482019-04-08 10:52:10 +01002944 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002945 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002946 {
2947 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002948 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
2949 inputTensorInfo.GetNumDimensions(),
2950 MaxNumOfTensorDimensions,
2951 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01002952 }
2953
2954 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2955
2956 // Add current input shape to splitterDimSizes
2957 for (unsigned int i = 0; i < inputDimSize; ++i)
2958 {
2959 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2960 }
2961
2962 if (splitterDimSizes[splitDim] % numSplits != 0)
2963 {
2964 throw ParseException("Number of splits must evenly divide the dimension");
2965 }
2966 splitterDimSizes[splitDim] /= numSplits;
2967
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002968 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002969 for (unsigned int j = 0; j < numSplits; ++j)
2970 {
2971 // Set the size of the views.
2972 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2973 {
2974 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2975 }
2976 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2977 }
2978
James Ward58dec6b2020-09-11 17:32:44 +01002979 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01002980 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002981 ARMNN_ASSERT(layer != nullptr);
Nina Drozd0324f482019-04-08 10:52:10 +01002982
2983 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002984 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002985
Nina Drozd0324f482019-04-08 10:52:10 +01002986 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2987 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002988 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002989 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002990 }
2991
2992 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2993 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2994}
2995
Derek Lambertif0176992020-04-28 13:37:49 +01002996unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
2997{
2998 int numDims = armnn::numeric_cast<int>(numDimsIn);
2999 int v = idx < 0 ? numDims + idx : idx;
3000 ARMNN_ASSERT(v >= 0);
3001 ARMNN_ASSERT(v < numDims);
3002
3003 return static_cast<unsigned int>(v);
3004}
3005
Kevin May7d96b162021-02-03 17:38:41 +00003006void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
Derek Lambertif0176992020-04-28 13:37:49 +01003007{
3008 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3009
3010 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Ryan OShea86704732020-05-26 11:41:04 +01003011 const auto * options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01003012
3013 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3014 CHECK_VALID_SIZE(inputs.size(), 3);
3015
3016 auto& inputTensor = inputs[0];
3017 auto& splitsTensor = inputs[1];
3018 auto& axisTensor = inputs[2];
3019
3020 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
3021 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
3022 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
3023 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
3024
3025 // Inputs
3026 auto inputDimSize = inputTensorInfo.GetNumDimensions();
3027 if (inputDimSize > MaxNumOfTensorDimensions)
3028 {
3029 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003030 fmt::format("The number of dimensions: {} for input tensors of the "
3031 "SplitV op cannot be greater than {} {}",
3032 inputTensorInfo.GetNumDimensions(),
3033 MaxNumOfTensorDimensions,
3034 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01003035 }
3036
3037 // Get split axis
3038 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003039 if (axisBufferPtr == nullptr)
3040 {
3041 throw ParseException(
3042 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
3043 CHECK_LOCATION().AsString()));
3044 }
3045
Derek Lambertif0176992020-04-28 13:37:49 +01003046 std::vector<int> axisData(axisTensorInfo.GetNumElements());
3047 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003048 int32_t axis = axisData[0];
3049
3050 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3051 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3052 {
3053 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
3054 // E.g. Rank 4 tensor can have axis in range [-4, 3)
3055 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
3056 throw ParseException(
3057 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
3058 axis,
3059 CHECK_LOCATION().AsString()));
3060 }
3061 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
Derek Lambertif0176992020-04-28 13:37:49 +01003062
Derek Lambertif0176992020-04-28 13:37:49 +01003063 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01003064 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01003065 unsigned int numSplits{0};
3066
3067 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01003068 {
3069 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01003070 }
3071 else
3072 {
Ryan OShea86704732020-05-26 11:41:04 +01003073 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01003074 }
3075
3076 if (numSplits <=0)
3077 {
3078 throw ParseException("SplitV has invalid number of splits");
3079 }
3080
Jan Eilersc0761e92020-06-29 16:48:44 +01003081 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01003082 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01003083 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01003084
Jan Eilersc0761e92020-06-29 16:48:44 +01003085 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01003086 int numInferred{0};
3087 unsigned int inferIdx{0};
3088 int splitSum{0};
3089 for (auto split : splitsData)
3090 {
3091 if (split < 0)
3092 {
3093 numInferred++;
3094 inferIdx = idx;
3095 }
3096 else
3097 {
3098 splitSum += split;
3099 }
3100 idx++;
3101 }
3102 // Check for inferred Axis
3103 if (numInferred == 0)
3104 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003105 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01003106 {
3107 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
3108 }
3109 }
3110 else if (numInferred == 1)
3111 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003112 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01003113 }
3114 else
3115 {
3116 throw ParseException("Cannot infer split size for more than one split");
3117 }
3118
Derek Lambertif0176992020-04-28 13:37:49 +01003119 //Ouput size validation
3120 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3121 CHECK_VALID_SIZE(outputs.size(), numSplits);
3122
3123 // Setup Armnn descriptor
3124 SplitterDescriptor splitDesc(numSplits, inputDimSize);
3125 unsigned int accumSplit = 0;
3126 for (unsigned int j = 0; j < numSplits; ++j)
3127 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003128 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01003129
3130 // Set the size of the views.
3131 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
3132 {
3133 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
3134 if (dimIdx == splitDim)
3135 {
3136 dimSize = splitSize;
3137 }
3138 splitDesc.SetViewSize(j, dimIdx, dimSize);
3139 }
3140
3141 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
3142 accumSplit += splitSize;
3143 }
3144
James Ward58dec6b2020-09-11 17:32:44 +01003145 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01003146 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003147 ARMNN_ASSERT(layer != nullptr);
Derek Lambertif0176992020-04-28 13:37:49 +01003148
3149 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3150 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3151
3152 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
3153 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01003154 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01003155 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
3156 }
3157
3158 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3159 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3160}
3161
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003162void TfLiteParserImpl::ParseArgMin(size_t subgraphIndex, size_t operatorIndex)
3163{
3164 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Min);
3165}
3166
Kevin May7d96b162021-02-03 17:38:41 +00003167void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
Inki Daed4619e22020-09-10 15:33:54 +09003168{
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003169 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Max);
3170}
3171
3172void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, ArgMinMaxFunction argMinMaxFunction)
3173{
Inki Daed4619e22020-09-10 15:33:54 +09003174 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3175 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3176 CHECK_VALID_SIZE(inputs.size(), 2);
3177
3178 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3179 CHECK_VALID_SIZE(outputs.size(), 1);
3180
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003181 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3182 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[1]);
Inki Daed4619e22020-09-10 15:33:54 +09003183 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003184 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003185
3186 // Check if output tensor type is Signed32 or Signed64
Mike Kelly1f140f72021-04-06 12:25:55 +01003187 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
3188 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
3189 {
3190 throw ParseException(
3191 fmt::format(
3192 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
3193 CHECK_LOCATION().AsString()));
3194 }
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003195
3196 // Get const axis value from model and set it to descriptor.
3197 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3198 if (axisBufferPtr == nullptr)
3199 {
3200 throw ParseException(
3201 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
3202 CHECK_LOCATION().AsString()));
3203 }
3204
3205 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
3206 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
3207 int32_t axis = axisData.front();
3208
3209 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3210 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3211 {
3212 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
3213 // E.g. Rank 4 tensor can have axis in range [-4, 3)
3214 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
3215 throw ParseException(
3216 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
3217 axis,
3218 CHECK_LOCATION().AsString()));
3219 }
3220
3221 ArgMinMaxDescriptor desc;
3222 desc.m_Axis = axis;
3223 desc.m_Function = argMinMaxFunction;
3224
3225 // Register a ArgMin/ArgMax layer.
3226 auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}";
3227 auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3228 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str());
3229 ARMNN_ASSERT(layer != nullptr);
Inki Daed4619e22020-09-10 15:33:54 +09003230 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3231
3232 // Register input tensor to the layer.
3233 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3234 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3235
3236 // Register output tensor to the layer.
3237 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3238 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3239}
3240
Kevin May7d96b162021-02-03 17:38:41 +00003241void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00003242{
3243 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3244
Kevin May7d96b162021-02-03 17:38:41 +00003245 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003246 CHECK_VALID_SIZE(inputs.size(), 2);
Kevin May7d96b162021-02-03 17:38:41 +00003247 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003248 CHECK_VALID_SIZE(outputs.size(), 1);
3249
3250 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3251 armnn::TensorInfo indicesTensorInfo = ToTensorInfo(inputs[1]);
3252 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3253
3254 armnn::GatherDescriptor gatherDescriptor;
3255
3256 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3257 const auto * options = operatorPtr->builtin_options.AsGatherOptions();
3258 auto axis = options->axis;
3259
3260 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3261 auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
3262 auto outputDimensions = outputTensorInfo.GetNumDimensions();
3263 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3264 {
3265 throw ParseException(
3266 fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
3267 axis,
3268 inputDimensions, inputDimensions,
3269 CHECK_LOCATION().AsString()));
3270 }
3271 if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
3272 {
3273 throw ParseException(
3274 fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
3275 outputDimensions,
3276 inputDimensions, indicesDimensions,
3277 CHECK_LOCATION().AsString()));
3278 }
3279
3280 gatherDescriptor.m_Axis = axis;
3281
3282 auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
3283 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
3284 ARMNN_ASSERT(layer != nullptr);
3285 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3286
3287 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3288 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3289
3290 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3291 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3292}
3293
Kevin May7d96b162021-02-03 17:38:41 +00003294void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00003295{
3296 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3297
Kevin May7d96b162021-02-03 17:38:41 +00003298 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003299 CHECK_VALID_SIZE(inputs.size(), 1);
Kevin May7d96b162021-02-03 17:38:41 +00003300 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003301 CHECK_VALID_SIZE(outputs.size(), 1);
3302
3303 armnn::DepthToSpaceDescriptor descriptor;
3304
3305 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3306 const auto * options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
3307 auto blockSize = options->block_size;
3308 if (blockSize < 2)
3309 {
3310 throw ParseException(
3311 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
3312 blockSize,
3313 CHECK_LOCATION().AsString()));
3314 }
3315 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
3316
3317 auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
3318 IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
3319 ARMNN_ASSERT(layer != nullptr);
3320 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3321 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3322
3323 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3324 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3325
3326 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3327 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3328}
3329
Kevin May7d96b162021-02-03 17:38:41 +00003330void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003331{
Sadik Armagana2747482021-02-09 10:28:54 +00003332 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Sum);
3333}
3334
3335void TfLiteParserImpl::ParseReduceMax(size_t subgraphIndex, size_t operatorIndex)
3336{
3337 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Max);
3338}
3339
3340void TfLiteParserImpl::ParseReduceMin(size_t subgraphIndex, size_t operatorIndex)
3341{
3342 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Min);
3343}
3344
3345void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, ReduceOperation reduceOperation)
3346{
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003347 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3348
3349 const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3350 const auto *options = operatorPtr->builtin_options.AsReducerOptions();
3351
3352 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3353 CHECK_VALID_SIZE(inputs.size(), 2);
3354
3355 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3356 CHECK_VALID_SIZE(outputs.size(), 1);
3357
Sadik Armagana2747482021-02-09 10:28:54 +00003358 auto layerName = fmt::format("Reduce:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003359
3360 armnn::TensorInfo inputTensorInfo0 = ToTensorInfo(inputs[0]);
3361 armnn::TensorInfo inputTensorInfo1 = ToTensorInfo(inputs[1]);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003362
3363 ReduceDescriptor desc;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003364 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3365 // Get const axis value from model and set it to descriptor.
3366 if (axisBufferPtr != nullptr)
3367 {
Sadik Armagan49bdb792021-02-11 13:57:07 +00003368 std::vector<int32_t> axisData(inputTensorInfo1.GetNumElements());
3369 ::memcpy(axisData.data(), axisBufferPtr->data.data(), inputTensorInfo1.GetNumBytes());
3370
3371 // Convert the axis to unsigned int and remove duplicates.
3372 auto rank = static_cast<int32_t>(inputTensorInfo0.GetNumDimensions());
3373 std::set<unsigned int> uniqueAxis;
3374 std::transform(axisData.begin(),
3375 axisData.end(),
3376 std::inserter(uniqueAxis, uniqueAxis.begin()),
3377 [rank](int i)->unsigned int{
3378 return static_cast<uint32_t>(((i + rank) % rank)); });
3379 desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003380 }
Sadik Armagana2747482021-02-09 10:28:54 +00003381 else
3382 {
3383 for (uint32_t i = 0; i < inputTensorInfo0.GetNumDimensions(); ++i)
3384 {
3385 desc.m_vAxis.push_back(i);
3386 }
3387 }
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003388
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003389 desc.m_KeepDims = options->keep_dims;
Sadik Armagana2747482021-02-09 10:28:54 +00003390 desc.m_ReduceOperation = reduceOperation;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003391
3392 // Register a new layer object, Sum.
3393 IConnectableLayer *layer = m_Network->AddReduceLayer(desc, layerName.c_str());
3394
3395 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
3396 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3397
3398 // Register input tensor to the layer.
3399 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3400 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3401
3402 // Register output tensor to the layer.
3403 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3404 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3405}
3406
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003407void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
3408{
3409 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
3410}
3411
3412void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
3413{
3414 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
3415}
3416
3417void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
3418{
3419 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
3420}
3421
3422void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
3423{
3424 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
3425}
3426
3427void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
3428{
3429 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
3430}
3431
3432void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
3433{
3434 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3435
3436 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3437 CHECK_VALID_SIZE(inputs.size(), 1);
3438
3439 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3440 CHECK_VALID_SIZE(outputs.size(), 1);
3441
3442 std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
3443 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3444
3445 ElementwiseUnaryDescriptor desc;
3446 desc.m_Operation = unaryOperation;
3447 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
3448 ARMNN_ASSERT(layer != nullptr);
3449
3450 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3451 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3452
3453 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3454 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3455
3456 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3457 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3458}
3459
Bruno Goncalves2d0eb862021-07-11 14:10:15 -03003460void TfLiteParserImpl::ParseEqual(size_t subgraphIndex, size_t operatorIndex)
3461{
3462 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Equal);
3463}
3464
3465void TfLiteParserImpl::ParseNotEqual(size_t subgraphIndex, size_t operatorIndex)
3466{
3467 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::NotEqual);
3468}
3469
3470void TfLiteParserImpl::ParseGreater(size_t subgraphIndex, size_t operatorIndex)
3471{
3472 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Greater);
3473}
3474
3475void TfLiteParserImpl::ParseGreaterOrEqual(size_t subgraphIndex, size_t operatorIndex)
3476{
3477 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::GreaterOrEqual);
3478}
3479
3480void TfLiteParserImpl::ParseLess(size_t subgraphIndex, size_t operatorIndex)
3481{
3482 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::Less);
3483}
3484
3485void TfLiteParserImpl::ParseLessOrEqual(size_t subgraphIndex, size_t operatorIndex)
3486{
3487 ParseComparison(subgraphIndex, operatorIndex, armnn::ComparisonOperation::LessOrEqual);
3488}
3489
3490void TfLiteParserImpl::ParseComparison(size_t subgraphIndex, size_t operatorIndex,
3491 ComparisonOperation comparisonOperation)
3492{
3493 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3494
3495 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3496 CHECK_VALID_SIZE(inputs.size(), 2);
3497
3498 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3499 CHECK_VALID_SIZE(outputs.size(), 1);
3500
3501 auto layerName = std::string(GetComparisonOperationAsCString(comparisonOperation)) + ":{}:{}";
3502 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3503
3504 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3505 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
3506 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerNameFormatted, "Input 0", "Input 1");
3507
3508 ComparisonDescriptor desc;
3509 desc.m_Operation = comparisonOperation;
3510 IConnectableLayer* layer = m_Network->AddComparisonLayer(desc, layerNameFormatted.c_str());
3511 ARMNN_ASSERT(layer != nullptr);
3512
3513 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3514 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3515
3516 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3517 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3518
3519 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3520 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3521}
3522
Kevin May7d96b162021-02-03 17:38:41 +00003523armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
3524 unsigned int outputSlot,
3525 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01003526{
3527 ActivationDescriptor activationDesc;
3528 std::string layerName = prevLayer->GetName();
3529
3530 switch(activationType)
3531 {
3532 case tflite::ActivationFunctionType_NONE:
3533 {
3534 // this is a no-op: return previous layer
3535 return prevLayer;
3536 }
3537 case tflite::ActivationFunctionType_RELU:
3538 {
3539 activationDesc.m_Function = ActivationFunction::ReLu;
3540 layerName += ":RELU";
3541 break;
3542 }
3543 case tflite::ActivationFunctionType_RELU6:
3544 {
3545 activationDesc.m_Function = ActivationFunction::BoundedReLu;
3546 activationDesc.m_A = 6.0f;
3547 activationDesc.m_B = 0.0f;
3548 layerName += ":RELU6";
3549 break;
3550 }
3551 case tflite::ActivationFunctionType_TANH:
3552 {
3553 activationDesc.m_Function = ActivationFunction::TanH;
3554 activationDesc.m_A = 1.0f;
3555 activationDesc.m_B = 1.0f;
3556 layerName += ":TANH";
3557 break;
3558 }
3559
3560 // I only put these here as a reminder what others we could support
3561 case tflite::ActivationFunctionType_RELU_N1_TO_1:
3562 case tflite::ActivationFunctionType_SIGN_BIT:
3563 default:
3564 {
3565 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003566 fmt::format("TfLite parser doesn't suppport fused activation: "
3567 "{}/{} {} ",
3568 activationType,
3569 tflite::EnumNameActivationFunctionType(activationType),
3570 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003571
3572 }
3573 }
3574
3575 IConnectableLayer* activationLayer =
3576 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
3577
3578 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
3579 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
3580 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
3581 return activationLayer;
3582}
3583
Kevin May7d96b162021-02-03 17:38:41 +00003584TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char * fileName)
telsoa01c577f2c2018-08-31 09:22:23 +01003585{
3586 if (fileName == nullptr)
3587 {
James Ward58dec6b2020-09-11 17:32:44 +01003588 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003589 CHECK_LOCATION().AsString()));
3590 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01003591 std::error_code errorCode;
3592 fs::path pathToFile(fileName);
3593 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01003594 {
James Ward58dec6b2020-09-11 17:32:44 +01003595 //fmt::format() could not be used here (format error)
3596 std::stringstream msg;
3597 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
3598 << " " << CHECK_LOCATION().AsString();
3599
3600 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01003601 }
3602 std::ifstream file(fileName, std::ios::binary);
3603 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3604 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3605 fileContent.size());
3606}
3607
Kevin May7d96b162021-02-03 17:38:41 +00003608TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
telsoa01c577f2c2018-08-31 09:22:23 +01003609{
3610 if (binaryContent == nullptr)
3611 {
James Ward58dec6b2020-09-11 17:32:44 +01003612 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003613 CHECK_LOCATION().AsString()));
3614 }
3615 flatbuffers::Verifier verifier(binaryContent, len);
3616 if (verifier.VerifyBuffer<tflite::Model>() == false)
3617 {
3618 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003619 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
3620 "flatbuffers format. size:{} {}",
3621 len,
3622 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003623 }
3624 return tflite::UnPackModel(binaryContent);
3625}
3626
Kevin May7d96b162021-02-03 17:38:41 +00003627TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetInputs(const ModelPtr & model,
3628 size_t subgraphIndex,
3629 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003630{
3631 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3632
Derek Lambertiff05cc52019-04-26 13:05:17 +01003633 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3634 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003635
3636 size_t inputCount = operatorPtr->inputs.size();
mathad01c21025d2021-04-26 10:09:37 +01003637 TensorRawPtrVector result;
telsoa01c577f2c2018-08-31 09:22:23 +01003638 for (size_t i=0; i<inputCount; ++i)
3639 {
mathad01c21025d2021-04-26 10:09:37 +01003640 // If the input location is -1 then assume input is turned off.
3641 if (operatorPtr->inputs[i] == -1)
3642 {
3643 continue;
3644 }
3645 else
3646 {
3647 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
3648 result.push_back(subgraphPtr->tensors[inputId].get());
3649 }
telsoa01c577f2c2018-08-31 09:22:23 +01003650 }
3651 return result;
3652}
3653
Kevin May7d96b162021-02-03 17:38:41 +00003654TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetOutputs(const ModelPtr & model,
3655 size_t subgraphIndex,
3656 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003657{
3658 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3659
Derek Lambertiff05cc52019-04-26 13:05:17 +01003660 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3661 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003662
3663 size_t outputCount = operatorPtr->outputs.size();
3664 TensorRawPtrVector result(outputCount);
3665 for (size_t i=0; i<outputCount; ++i)
3666 {
3667 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3668 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003669 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003670 }
3671 return result;
3672}
3673
Kevin May7d96b162021-02-03 17:38:41 +00003674TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphInputs(const ModelPtr & model,
3675 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003676{
3677 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003678 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003679
Derek Lambertiff05cc52019-04-26 13:05:17 +01003680 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003681 TensorIdRawPtrVector result(inputCount);
3682 for (size_t i=0; i<inputCount; ++i)
3683 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003684 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01003685 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003686 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003687 }
3688 return result;
3689}
3690
Kevin May7d96b162021-02-03 17:38:41 +00003691TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphOutputs(const ModelPtr & model,
3692 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003693{
3694 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003695 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003696
Derek Lambertiff05cc52019-04-26 13:05:17 +01003697 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003698 TensorIdRawPtrVector result(outputCount);
3699 for (size_t i=0; i<outputCount; ++i)
3700 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003701 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3702 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003703 }
3704 return result;
3705}
3706
Kevin May7d96b162021-02-03 17:38:41 +00003707std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
3708 size_t subgraphIndex,
3709 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003710{
3711 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003712 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3713 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003714 return operatorPtr->inputs;
3715}
3716
Kevin May7d96b162021-02-03 17:38:41 +00003717std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
3718 size_t subgraphIndex,
3719 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003720{
3721 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003722 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3723 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003724 return operatorPtr->outputs;
3725}
3726
Kevin May7d96b162021-02-03 17:38:41 +00003727void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
3728 size_t operatorIndex,
3729 IConnectableLayer* layer,
Finn Williamsd4fa5452021-03-01 12:31:41 +00003730 const std::vector<unsigned int>& tensorIndexes,
3731 unsigned int startingSlotIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003732{
3733 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003734 ARMNN_ASSERT(layer != nullptr);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003735 if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots())
telsoa01c577f2c2018-08-31 09:22:23 +01003736 {
3737 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003738 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
3739 " for subgraph:{} operator index:{} {}",
3740 tensorIndexes.size(),
3741 layer->GetNumInputSlots(),
3742 subgraphIndex,
3743 operatorIndex,
3744 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003745 }
3746
Finn Williamsd4fa5452021-03-01 12:31:41 +00003747 for (unsigned int index = 0; index < tensorIndexes.size() ; ++index)
telsoa01c577f2c2018-08-31 09:22:23 +01003748 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00003749 unsigned int tensorIndex = tensorIndexes[index];
3750 armnn::IInputSlot* slot = &(layer->GetInputSlot(startingSlotIndex + index));
telsoa01c577f2c2018-08-31 09:22:23 +01003751 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
3752 }
3753}
3754
Kevin May7d96b162021-02-03 17:38:41 +00003755void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
3756 size_t operatorIndex,
3757 IConnectableLayer* layer,
3758 const std::vector<unsigned int>& tensorIndexes)
telsoa01c577f2c2018-08-31 09:22:23 +01003759{
3760 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003761 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003762 if (tensorIndexes.size() != layer->GetNumOutputSlots())
3763 {
3764 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003765 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
3766 " for subgraph:{} operator index:{} {}",
3767 tensorIndexes.size(),
3768 layer->GetNumOutputSlots(),
3769 subgraphIndex,
3770 operatorIndex,
3771 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003772 }
3773
3774 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
3775 {
3776 unsigned int tensorIndex = tensorIndexes[slotIndex];
3777 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
3778 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3779 }
3780}
3781
Kevin May7d96b162021-02-03 17:38:41 +00003782void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003783{
3784 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3785
3786 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
3787 for (auto const & tensorIdAndPtr : inputs)
3788 {
3789 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3790 IConnectableLayer* layer =
3791 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3792
3793 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
3794 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3795
3796 RegisterOutputSlots(subgraphIndex,
3797 VIRTUAL_OPERATOR_ID,
3798 layer,
3799 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3800 }
3801}
3802
Kevin May7d96b162021-02-03 17:38:41 +00003803void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003804{
3805 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3806
3807 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
3808 for (auto const & tensorIdAndPtr : outputs)
3809 {
3810 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3811 IConnectableLayer* layer =
3812 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3813
3814 RegisterInputSlots(subgraphIndex,
3815 VIRTUAL_OPERATOR_ID,
3816 layer,
3817 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3818 }
3819}
3820
Kevin May7d96b162021-02-03 17:38:41 +00003821void TfLiteParserImpl::SetupConstantLayers(size_t subgraphIndex)
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003822{
3823 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3824
Derek Lambertiff05cc52019-04-26 13:05:17 +01003825 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003826 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
3827 {
3828 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
3829 {
3830 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
3831 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
3832 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003833 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003834 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003835 auto tensorAndData = CreateConstTensorNonPermuted(tensorPtr, tensorInfo);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003836
James Ward58dec6b2020-09-11 17:32:44 +01003837 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003838 IConnectableLayer *layer =
Finn Williamsd4fa5452021-03-01 12:31:41 +00003839 m_Network->AddConstantLayer(tensorAndData, layerName.c_str());
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003840
3841 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3842 RegisterOutputSlots(subgraphIndex,
3843 VIRTUAL_OPERATOR_ID,
3844 layer,
3845 { tensorIndex });
3846
3847 }
3848 }
3849 }
3850}
3851
telsoa01c577f2c2018-08-31 09:22:23 +01003852// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Kevin May7d96b162021-02-03 17:38:41 +00003853TfLiteParserImpl::BufferRawPtr TfLiteParserImpl::GetBuffer(const ModelPtr& model, size_t bufferIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003854{
3855 CHECK_BUFFER(model, bufferIndex);
3856 return model->buffers[bufferIndex].get();
3857}
3858
Matteo Martincigh747ef822018-12-18 09:26:39 +00003859template<typename T>
Kevin May7d96b162021-02-03 17:38:41 +00003860std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
3861TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
3862 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00003863 armnn::TensorInfo& tensorInfo,
3864 armnn::Optional<armnn::PermutationVector&> permutationVector)
3865{
3866 auto constData = CreateConstTensorImpl<T>(bufferPtr,
3867 tensorPtr,
3868 tensorInfo,
3869 permutationVector);
Kevin May7d96b162021-02-03 17:38:41 +00003870 TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
Matteo Martincigh747ef822018-12-18 09:26:39 +00003871 return std::make_pair(constData.first, std::move(storage));
3872}
3873
Finn Williamsd4fa5452021-03-01 12:31:41 +00003874bool TfLiteParserImpl::IsConstTensor(TensorRawPtr tensorPtr)
3875{
3876 CHECK_TENSOR_PTR(tensorPtr);
mathad01bf7edb62021-04-20 16:12:45 +01003877 bool isConst = true;
3878
3879 auto buffer = GetBuffer(m_Model, tensorPtr->buffer);
3880 if (buffer->data.size() == 0)
3881 {
3882 isConst = false;
3883 }
3884
3885 return isConst;
Finn Williamsd4fa5452021-03-01 12:31:41 +00003886}
3887
3888
Kevin May7d96b162021-02-03 17:38:41 +00003889std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
Finn Williamsd4fa5452021-03-01 12:31:41 +00003890TfLiteParserImpl::CreateConstTensorPermuted(TensorRawPtr tensorPtr,
3891 armnn::TensorInfo& tensorInfo,
3892 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01003893{
3894 CHECK_TENSOR_PTR(tensorPtr);
3895 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
3896 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
3897
3898 switch (tensorInfo.GetDataType())
3899 {
3900 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003901 return CreateConstTensorAndStoreData<float>(bufferPtr,
3902 tensorPtr,
3903 tensorInfo,
3904 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00003905 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003906 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
3907 tensorPtr,
3908 tensorInfo,
3909 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00003910 case armnn::DataType::QSymmS8:
3911 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3912 tensorPtr,
3913 tensorInfo,
3914 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00003915 case armnn::DataType::QAsymmS8:
3916 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3917 tensorPtr,
3918 tensorInfo,
3919 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003920 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003921 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
3922 tensorPtr,
3923 tensorInfo,
3924 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003925 default:
3926 {
3927 std::stringstream errString;
3928 errString << "Unexpected datatype when creating const tensor: "
3929 << armnn::GetDataTypeName(tensorInfo.GetDataType())
3930 << " shape:" << tensorInfo.GetShape()
3931 << CHECK_LOCATION().AsString();
3932 throw ParseException(errString.str());
3933 }
3934 }
3935}
3936
Finn Williamsd4fa5452021-03-01 12:31:41 +00003937armnn::ConstTensor TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
3938 armnn::TensorInfo& tensorInfo)
3939{
3940 CHECK_TENSOR_PTR(tensorPtr);
3941 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
3942 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
3943
3944 return ConstTensor(tensorInfo, bufferPtr->data.data());
3945}
3946
Kevin May7d96b162021-02-03 17:38:41 +00003947BindingPointInfo TfLiteParserImpl::GetNetworkInputBindingInfo(size_t subgraphId,
3948 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01003949{
3950 CHECK_SUBGRAPH(m_Model, subgraphId);
3951 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3952 for (auto const & input : inputs)
3953 {
3954 if (input.second->name == name)
3955 {
3956 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3957 return std::make_pair(bindingId, ToTensorInfo(input.second));
3958 }
3959 }
3960
3961 std::stringstream bindings;
3962 for (auto const & input : inputs)
3963 {
3964 bindings << "'" << input.second->name << "' ";
3965 }
3966
3967 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003968 fmt::format("No input binding found for subgraph:{} and name:{}. "
3969 "Possible inputs are: [{}] {}",
3970 subgraphId,
3971 name,
3972 bindings.str(),
3973 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003974}
3975
Kevin May7d96b162021-02-03 17:38:41 +00003976BindingPointInfo TfLiteParserImpl::GetNetworkOutputBindingInfo(size_t subgraphId,
3977 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01003978{
3979 CHECK_SUBGRAPH(m_Model, subgraphId);
3980 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003981 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01003982 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003983 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01003984 if (output.second->name == name)
3985 {
3986 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003987 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3988 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3989 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01003990 }
3991 }
3992
3993 std::stringstream bindings;
3994 for (auto const & output : outputs)
3995 {
3996 bindings << "'" << output.second->name << "' ";
3997 }
3998
3999 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01004000 fmt::format("No output binding found for subgraph:{} and name:{}. "
4001 "Possible outputs are: [{}] {}",
4002 subgraphId,
4003 name,
4004 bindings.str(),
4005 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01004006}
4007
Kevin May7d96b162021-02-03 17:38:41 +00004008size_t TfLiteParserImpl::GetSubgraphCount() const
telsoa01c577f2c2018-08-31 09:22:23 +01004009{
4010 return m_Model->subgraphs.size();
4011}
4012
Kevin May7d96b162021-02-03 17:38:41 +00004013std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01004014{
4015 CHECK_SUBGRAPH(m_Model, subgraphId);
4016 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
4017 std::vector<std::string> result;
4018 result.reserve(inputs.size());
4019 for (auto const & input : inputs)
4020 {
4021 result.push_back(input.second->name);
4022 }
4023 return result;
4024}
4025
Kevin May7d96b162021-02-03 17:38:41 +00004026std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01004027{
4028 CHECK_SUBGRAPH(m_Model, subgraphId);
4029 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
4030 std::vector<std::string> result;
4031 result.reserve(outputs.size());
4032 for (auto const & output : outputs)
4033 {
4034 result.push_back(output.second->name);
4035 }
4036 return result;
4037}
4038
Matthew Sloyanac001ee2021-02-03 10:43:04 +00004039const std::string TfLiteParserImpl::GetVersion()
4040{
4041 return TFLITE_PARSER_VERSION;
4042}
4043
Kevin May7d96b162021-02-03 17:38:41 +00004044TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01004045: m_FloatData(std::move(data))
4046, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00004047, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01004048, m_Int32Data(nullptr)
4049{
4050}
4051
Kevin May7d96b162021-02-03 17:38:41 +00004052TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01004053: m_FloatData(nullptr)
4054, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00004055, m_Int8Data(nullptr)
4056, m_Int32Data(nullptr)
4057{
4058}
4059
Kevin May7d96b162021-02-03 17:38:41 +00004060TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
Keith Davisd305e1a2020-01-22 11:57:54 +00004061: m_FloatData(nullptr)
4062, m_Uint8Data(nullptr)
4063, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01004064, m_Int32Data(nullptr)
4065{
4066}
4067
Kevin May7d96b162021-02-03 17:38:41 +00004068TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01004069: m_FloatData(nullptr)
4070, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00004071, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01004072, m_Int32Data(std::move(data))
4073{
4074}
4075
4076} // armnnTfLiteParser