blob: 8ce16675571defcb925ff396330f64fe8a75b86e [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kellyc5789ca2020-07-06 19:24:15 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Sloyanac001ee2021-02-03 10:43:04 +00008#include "armnnTfLiteParser/Version.hpp"
9
Sadik Armagand109a4d2020-07-28 10:42:13 +010010#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +000011#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010012#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000013#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010014#include <armnn/Tensor.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000015#include <armnnUtils/TensorUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010016#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010017#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000018#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010019#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010020
21// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000022#include <armnnUtils/Permute.hpp>
Francis Murtagh532a29d2020-06-29 11:50:01 +010023#include <Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000024
Sadik Armagan479045b2018-10-01 11:51:37 +010025#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026#include <VerificationHelpers.hpp>
27
28// The generated code based on the Tf Lite schema:
29#include <schema_generated.h>
30
Matteo Martincighe011d202019-11-28 11:35:47 +000031#include <flatbuffers/flexbuffers.h>
32
James Ward58dec6b2020-09-11 17:32:44 +010033#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010034
telsoa01c577f2c2018-08-31 09:22:23 +010035#include <algorithm>
Matthew Sloyanac001ee2021-02-03 10:43:04 +000036#include <fstream>
37#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010038#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010039#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000040#include <sstream>
41
42#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
43 { \
44 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
45 << ": " \
46 << CHECK_LOCATION().AsString()).str()); \
47 }
telsoa01c577f2c2018-08-31 09:22:23 +010048
49using namespace armnn;
50using armnn::CheckLocation;
51namespace armnnTfLiteParser
52{
Kevin May7d96b162021-02-03 17:38:41 +000053
54ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
55 pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
56
57ITfLiteParser::~ITfLiteParser() = default;
58
59ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
60{
61 return new ITfLiteParser(options);
62}
63
64ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
65{
66 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
67}
68
69void ITfLiteParser::Destroy(ITfLiteParser* parser)
70{
71 delete parser;
72}
73
74armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
75{
76 return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
77}
78
79armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
80{
81 return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
82}
83
84BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
85 const std::string& name) const
86{
87 return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
88}
89
90BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
91 const std::string& name) const
92{
93 return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
94}
95
96size_t ITfLiteParser::GetSubgraphCount() const
97{
98 return pTfLiteParserImpl->GetSubgraphCount();
99}
100
101std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
102{
103 return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
104}
105
106std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
107{
108 return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
109}
110
telsoa01c577f2c2018-08-31 09:22:23 +0100111namespace
112{
jimfly01c25411c2018-11-14 17:47:22 +0000113
telsoa01c577f2c2018-08-31 09:22:23 +0100114const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
115
Kevin May7d96b162021-02-03 17:38:41 +0000116void CheckSubgraph(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100117 size_t subgraphIndex,
118 const CheckLocation & location)
119{
120 if (model.get() == nullptr)
121 {
122 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100123 fmt::format("{} was called with invalid (null) model. "
124 "Possible reason is that the model is not yet loaded and Unpack(ed). "
125 "subgraph:{} at {}",
126 location.m_Function,
127 subgraphIndex,
128 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100129 }
130 else if (subgraphIndex >= model->subgraphs.size())
131 {
132 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100133 fmt::format("{} was called with an invalid subgraph index. "
134 "subgraph:{} at {}",
135 location.m_Function,
136 subgraphIndex,
137 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100138 }
139}
140
141#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
142 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
143
Kevin May7d96b162021-02-03 17:38:41 +0000144void CheckModel(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100145 size_t subgraphIndex,
146 size_t operatorIndex,
147 const CheckLocation & location)
148{
149 if (model.get() == nullptr)
150 {
151 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100152 fmt::format("{} was called with invalid (null) model. "
153 "Possible reason is that the model is not yet loaded and Unpack(ed). "
154 "subgraph:{} operator:{} at {}",
155 location.m_Function,
156 subgraphIndex,
157 operatorIndex,
158 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100159 }
160 else if (subgraphIndex >= model->subgraphs.size())
161 {
162 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100163 fmt::format("{} was called with an invalid subgraph index. "
164 "subgraph:{} operator:{} at {}",
165 location.m_Function,
166 subgraphIndex,
167 operatorIndex,
168 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100169 }
170 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
171 operatorIndex != VIRTUAL_OPERATOR_ID)
172 {
173 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100174 fmt::format("{} was called with an invalid operator index. "
175 "subgraph:{} operator:{} at {}",
176 location.m_Function,
177 subgraphIndex,
178 operatorIndex,
179 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100180 }
181}
182
183#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
184 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
185
Kevin May7d96b162021-02-03 17:38:41 +0000186void CheckTensor(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100187 size_t subgraphIndex,
188 size_t tensorIndex,
189 const CheckLocation & location)
190{
191 // not checking model, because I assume CHECK_MODEL already run
192 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100193 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100194
195 // also subgraph index should be checked by CHECK_MODEL so
196 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100197 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100198
199 // the tensor index is the only one to check here
200 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
201 {
202 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100203 fmt::format("{} was called with an invalid tensor index. "
204 "subgraph:{} tensor:{} at {}",
205 location.m_Function,
206 subgraphIndex,
207 tensorIndex,
208 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100209 }
210}
211
212#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
213 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
214
Kevin May7d96b162021-02-03 17:38:41 +0000215void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100216 const CheckLocation & location)
217{
218 if (rawPtr == nullptr)
219 {
220 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100221 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100222 }
223}
224
225#define CHECK_TENSOR_PTR(TENSOR_PTR) \
226 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
227
Kevin May7d96b162021-02-03 17:38:41 +0000228void CheckBuffer(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100229 size_t bufferIndex,
230 const CheckLocation & location)
231{
232 if (model.get() == nullptr)
233 {
234 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100235 fmt::format("{} was called with invalid (null) model. "
236 "Possible reason is that the model is not yet loaded and Unpack(ed). "
237 "buffer:{} at {}",
238 location.m_Function,
239 bufferIndex,
240 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100241 }
242 else if (bufferIndex >= model->buffers.size())
243 {
244 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100245 fmt::format("{} was called with an invalid buffer index. "
246 "buffer index:{} at {}",
247 location.m_Function,
248 bufferIndex,
249 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100250 }
251 else if (model->buffers[bufferIndex].get() == nullptr)
252 {
253 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100254 fmt::format("The buffer #{} is null. {}",
255 bufferIndex,
256 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100257 }
258}
259
260#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
261 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
262
Kevin May7d96b162021-02-03 17:38:41 +0000263void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100264 const armnn::TensorInfo & tensorInfo,
265 uint32_t bufferId,
266 const CheckLocation & location)
267{
268 if (bufferPtr == nullptr)
269 {
270 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100271 fmt::format("BufferPtr is null for buffer:{}. {}",
272 bufferId,
273 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100274 }
275 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
276 tensorInfo.GetNumBytes() > bufferPtr->data.size())
277 {
278 std::stringstream ss;
279 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
280 << "For tensor: " << tensorInfo.GetShape()
281 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
282 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
283 throw ParseException(ss.str());
284 }
285}
286
287#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
288 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
289
290bool IsActivationSupported(tflite::ActivationFunctionType activationType)
291{
292 switch(activationType)
293 {
294 case tflite::ActivationFunctionType_NONE:
295 case tflite::ActivationFunctionType_RELU:
296 case tflite::ActivationFunctionType_RELU6:
297 case tflite::ActivationFunctionType_TANH:
298 {
299 return true;
300 }
301 default:
302 {
303 return false;
304 }
305 }
306}
307
308#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
309 do { \
310 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
311 { \
312 throw ParseException( \
James Ward58dec6b2020-09-11 17:32:44 +0100313 fmt::format("TfLite parser doesn't suppport fused activation: " \
314 "{}/{} in {} subgraph:{} operator:{} at {}", \
315 OPTION->fused_activation_function, \
316 tflite::EnumNameActivationFunctionType(\
317 OPTION->fused_activation_function), \
318 __func__, \
319 SUBGRAPH_INDEX, \
320 OPERATOR_INDEX, \
321 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100322 } \
323 } while(false)
324
325
326std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
327{
328 std::vector<unsigned int> result;
329 result.reserve(in.size());
330 for (auto & i : in)
331 {
332 result.push_back(CHECKED_NON_NEGATIVE(i));
333 }
334 return result;
335}
336
337void CalcPadding(uint32_t inputSize,
338 uint32_t filterSize,
339 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100340 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100341 uint32_t& paddingFront,
342 uint32_t& paddingBack,
343 tflite::Padding padding)
344{
345 paddingFront = 0;
346 paddingBack = 0;
347 if (padding == tflite::Padding_SAME)
348 {
349 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100350 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
351 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100352 if (temp > inputSize)
353 {
354 paddingFront = (temp - inputSize) / 2;
355 paddingBack = (temp - inputSize) - paddingFront;
356 }
357 }
358}
359
Kevin May7d96b162021-02-03 17:38:41 +0000360armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100361 const std::vector<unsigned int>& shapes,
362 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3},
363 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100364{
365 armnn::DataType type;
366 CHECK_TENSOR_PTR(tensorPtr);
367
368 switch (tensorPtr->type)
369 {
370 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000371 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100372 break;
373 case tflite::TensorType_FLOAT32:
374 type = armnn::DataType::Float32;
375 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000376 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000377 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000378 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000379 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000380 type = armnn::DataType::QAsymmS8;
381 }
382 else
383 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000384 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000385 type = armnn::DataType::QSymmS8;
386 }
Finn Williamsed66d142019-12-06 09:55:55 +0000387 break;
388 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000389 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000390 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100391 case tflite::TensorType_INT32:
392 type = armnn::DataType::Signed32;
393 break;
Inki Daed4619e22020-09-10 15:33:54 +0900394 case tflite::TensorType_INT64:
395 type = armnn::DataType::Signed64;
396 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100397 default:
398 {
399 CheckLocation location = CHECK_LOCATION();
400 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100401 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
402 tensorPtr->type,
403 tflite::EnumNameTensorType(tensorPtr->type),
404 tensorPtr->name,
405 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100406 }
407 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100408 std::vector<unsigned int> safeShape = shapes;
Sadik Armagand109a4d2020-07-28 10:42:13 +0100409 bool isDynamic = false;
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100410 if (safeShape.size() == 0)
411 {
412 safeShape.push_back(1);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100413 if (outputTensor)
414 {
415 isDynamic = true;
416 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100417 }
418
Keith Davisd305e1a2020-01-22 11:57:54 +0000419 float quantizationScale = 0.0f;
420 int32_t quantizationOffset = 0;
421
422 if (tensorPtr->quantization.get())
423 {
424 if (tensorPtr->quantization->scale.size() <= 1)
425 {
426 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
427 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
428
429 if (tensorPtr->quantization->scale.size() == 1)
430 {
431 quantizationScale = tensorPtr->quantization->scale[0];
432 }
433 if (tensorPtr->quantization->zero_point.size() == 1)
434 {
435 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000436 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100437 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000438 }
439
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100440 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100441 safeShape.data());
442 if (isDynamic)
443 {
444 tensorShape = TensorShape(1, false);
445 }
446 armnn::TensorInfo result(tensorShape,
447 type,
448 quantizationScale,
449 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000450 return result;
451 }
452 else
453 {
454 std::vector<float> quantizationScales;
455 std::vector<int32_t> quantizationOffsets;
456
457 // Scale
458 std::copy(tensorPtr->quantization->scale.begin(),
459 tensorPtr->quantization->scale.end(),
460 std::back_inserter(quantizationScales));
461
Keith Davis0c2eeac2020-02-11 16:51:50 +0000462 // QSymmS8 Per-axis
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100463 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100464 safeShape.data());
465 if (isDynamic)
466 {
467 tensorShape = TensorShape(1, false);
468 }
469 armnn::TensorInfo result(tensorShape,
470 type,
471 quantizationScales,
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100472 dimensionMappings[armnn::numeric_cast<unsigned int>(
Sadik Armagand109a4d2020-07-28 10:42:13 +0100473 tensorPtr->quantization->quantized_dimension)]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000474 return result;
475 }
476 }
477 else
478 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100479 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100480 safeShape.data());
481 if (isDynamic)
482 {
483 tensorShape = TensorShape(1, false);
484 }
485 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000486 type,
487 quantizationScale,
488 quantizationOffset);
489 return result;
490 }
telsoa01c577f2c2018-08-31 09:22:23 +0100491}
492
Kevin May7d96b162021-02-03 17:38:41 +0000493armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Keith Davis0c2eeac2020-02-11 16:51:50 +0000494 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000495{
496 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000497 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000498}
499
Kevin May7d96b162021-02-03 17:38:41 +0000500armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100501 const bool outputTensor)
502{
503 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
504 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3};
505 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings, outputTensor);
506}
507
telsoa01c577f2c2018-08-31 09:22:23 +0100508template<typename T>
509std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
Kevin May7d96b162021-02-03 17:38:41 +0000510CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
511 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000512 armnn::TensorInfo& tensorInfo,
513 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100514{
Jan Eilers8eb25602020-03-09 12:13:48 +0000515 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100516 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
517 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
James Ward58dec6b2020-09-11 17:32:44 +0100518 fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
telsoa01c577f2c2018-08-31 09:22:23 +0100519
520 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000521
522 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
523 {
524 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000525 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
526 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000527 }
528 else
529 {
530 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
531 }
532
telsoa01c577f2c2018-08-31 09:22:23 +0100533 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
534}
535
telsoa01c577f2c2018-08-31 09:22:23 +0100536armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
537{
538 // generate the binding id by shifting the tensor id by 8 bit
539 // and add the subgraph id, which allows 256 subgraphs
540 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
541}
542
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000543bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
544{
545 const unsigned int actualSize = actual.GetNumDimensions();
546 if (actualSize != expected.size())
547 {
548 return false;
549 }
550
551 for (unsigned int i = 0u; i < actualSize; i++)
552 {
553 if (expected[i] < 0 ||
554 actual[i] != static_cast<unsigned int>(expected[i]))
555 {
556 return false;
557 }
558 }
559
560 return true;
561}
562
James Conroy05102392020-06-24 15:39:55 +0100563void CheckMatchingQuantization(const TensorInfo& first,
564 const TensorInfo& second,
565 const std::string& descName,
566 std::string const& firstName,
567 std::string const& secondName)
568{
569 if (!first.IsQuantized() ||
570 !second.IsQuantized())
571 {
572 // Not a quantized type, ignore the validation
573 return;
574 }
575
576 DataType firstDataType = first.GetDataType();
577 DataType secondDataType = second.GetDataType();
578
579 if (firstDataType != secondDataType)
580 {
581 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
582 " must be of the same quantized type, " +
583 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
584 secondName + " is " + GetDataTypeName(secondDataType));
585 }
586
587 if (!first.IsTypeSpaceMatch(second))
588 {
589 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
590 " must have the same quantization space, " +
591 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
592 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
593 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
594 " and scale " + std::to_string(second.GetQuantizationScale()));
595 }
596}
597
telsoa01c577f2c2018-08-31 09:22:23 +0100598} // <anonymous>
599
Kevin May7d96b162021-02-03 17:38:41 +0000600TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100601: m_Options(options)
602, m_Network(nullptr, nullptr)
Kevin May7d96b162021-02-03 17:38:41 +0000603, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
telsoa01c577f2c2018-08-31 09:22:23 +0100604{
605 // register supported operators
Kevin May7d96b162021-02-03 17:38:41 +0000606 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
607 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
608 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
609 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
610 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
611 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
612 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
613 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
614 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
615 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
616 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
617 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
618 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
619 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
620 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
621 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
622 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
623 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
624 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
625 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
626 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
627 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
628 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
629 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
630 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
631 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
632 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
633 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
Sadik Armagana2747482021-02-09 10:28:54 +0000634 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
635 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
Kevin May7d96b162021-02-03 17:38:41 +0000636 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
637 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
638 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
639 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
640 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
641 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
642 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
643 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
644 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
645 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
646 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
647 m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
648 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
649 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
650 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
651 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
652 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
653 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100654 // register supported custom operators
Kevin May7d96b162021-02-03 17:38:41 +0000655 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100656}
657
Kevin May7d96b162021-02-03 17:38:41 +0000658void TfLiteParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100659{
660 m_Network = armnn::INetworkPtr(nullptr, nullptr);
661 m_Model = nullptr;
662 m_SubgraphConnections.clear();
663}
664
Kevin May7d96b162021-02-03 17:38:41 +0000665INetworkPtr TfLiteParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100666{
667 ResetParser();
668 m_Model = LoadModelFromFile(graphFile);
669 return CreateNetworkFromModel();
670}
671
Kevin May7d96b162021-02-03 17:38:41 +0000672INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
telsoa01c577f2c2018-08-31 09:22:23 +0100673{
674 ResetParser();
675 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
676 return CreateNetworkFromModel();
677}
678
Kevin May7d96b162021-02-03 17:38:41 +0000679INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
telsoa01c577f2c2018-08-31 09:22:23 +0100680{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100681
682 using NetworkOptions = std::vector<BackendOptions>;
683 NetworkOptions networkOptions = {};
684 if (m_Options && m_Options.value().m_InferAndValidate)
685 {
686 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
687 {
688 { "InferAndValidate", true }
689 });
690
691 networkOptions.push_back(shapeInferenceMethodOption);
692 }
693
694 m_Network = INetwork::Create(networkOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100695 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100696
telsoa01c577f2c2018-08-31 09:22:23 +0100697 if (m_Model->subgraphs.size() != 1)
698 {
699 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100700 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
701 m_Model->subgraphs.size(),
702 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100703 }
704
705 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +0100706 size_t operatorIndex = 0;
707 try
telsoa01c577f2c2018-08-31 09:22:23 +0100708 {
Colm Donelan6350d272020-06-09 16:56:25 +0100709 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100710 {
Colm Donelan6350d272020-06-09 16:56:25 +0100711 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
712 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +0100713 {
Colm Donelan6350d272020-06-09 16:56:25 +0100714 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
telsoa01c577f2c2018-08-31 09:22:23 +0100715 auto builtinCode = opCodePtr->builtin_code;
716
717 if (builtinCode > tflite::BuiltinOperator_MAX)
718 {
James Ward58dec6b2020-09-11 17:32:44 +0100719 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
720 "subgraph:{} operator idx:{}. {}",
721 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
722 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100723 }
724
725 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +0100726 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +0100727 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +0100728 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +0100729 }
telsoa01c577f2c2018-08-31 09:22:23 +0100730
Colm Donelan6350d272020-06-09 16:56:25 +0100731 SetupInputLayers(subgraphIndex);
732 SetupOutputLayers(subgraphIndex);
733 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100734
Colm Donelan6350d272020-06-09 16:56:25 +0100735 ++subgraphIndex;
736 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +0100737 }
telsoa01c577f2c2018-08-31 09:22:23 +0100738 }
Colm Donelan6350d272020-06-09 16:56:25 +0100739 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +0100740 {
Colm Donelan6350d272020-06-09 16:56:25 +0100741 std::stringstream errorString;
742 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
743 << subgraphIndex << " error: " << e.what();
744 ARMNN_LOG(error) << errorString.str();
745 std::stringstream errors;
746 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +0100747 throw ParseException(errors.str());
748 }
749
750 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +0100751 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100752 {
753 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
754 {
755 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
756 {
757 for (size_t inputSlotIdx = 0;
758 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
759 ++inputSlotIdx)
760 {
761 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
762 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
763 }
764 }
765 }
766 }
767
768 return std::move(m_Network);
769}
770
Kevin May7d96b162021-02-03 17:38:41 +0000771void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
772 size_t tensorIndex,
773 armnn::IOutputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +0100774{
775 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100776 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
777 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100778
779 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
780
781 // assuming there is only one producer for that tensor
782 if (tensorSlots.outputSlot != nullptr)
783 {
James Ward58dec6b2020-09-11 17:32:44 +0100784 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
785 "subgraph:{} tensor:{} {}",
786 subgraphIndex,
787 tensorIndex,
788 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100789 }
790
791 tensorSlots.outputSlot = slot;
792}
793
Kevin May7d96b162021-02-03 17:38:41 +0000794void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
795 size_t tensorIndex,
796 armnn::IInputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +0100797{
798 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100799 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
800 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100801
802 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
803 tensorSlots.inputSlots.push_back(slot);
804}
805
Kevin May7d96b162021-02-03 17:38:41 +0000806void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100807{
808 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
809
810 // NOTE: By default we presume the custom operator is not supported
Kevin May7d96b162021-02-03 17:38:41 +0000811 auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100812
813 // Identify custom code defined for custom operator
814 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
815 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
816
817 // Find parser function that correspondes to custom code (if any)
818 auto iterator = m_CustomParserFunctions.find(customCode);
819 if (iterator != m_CustomParserFunctions.end())
820 {
821 customParserFunction = iterator->second;
822 }
823
824 // Run parser function
825 (this->*customParserFunction)(subgraphIndex, operatorIndex);
826}
827
Kevin May7d96b162021-02-03 17:38:41 +0000828void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100829{
830 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100831
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100832 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
833
834 auto opcodeIndex = operatorPtr->opcode_index;
835 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
836
837 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
838 {
839 // Do not add StandInLayer, throw ParseException instead
840 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100841 fmt::format("Operator not supported. "
842 "subgraph:{} operator:{} "
843 "opcode_index:{} opcode:{} / {} {}",
844 subgraphIndex,
845 operatorIndex,
846 opcodeIndex,
847 opcode,
848 tflite::EnumNameBuiltinOperator(opcode),
849 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100850 }
851
852 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
853 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
854
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100855 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
856 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100857
858 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +0100859 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100860
861 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
862 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +0100863 ARMNN_ASSERT(layer != nullptr);
864
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100865 for (unsigned int i = 0u; i < numOutputs; ++i)
866 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100867 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100868 }
869
870 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
871 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
872
873 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
874 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100875}
876
Kevin May7d96b162021-02-03 17:38:41 +0000877void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100878{
879 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
880
881 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
882 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
883
884 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
885
886 Convolution2dDescriptor desc;
887 desc.m_BiasEnabled = false;
888 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
889 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000890 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100891 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
892 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000893
telsoa01c577f2c2018-08-31 09:22:23 +0100894 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
895 CHECK_VALID_SIZE(inputs.size(), 2, 3);
896
897 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
898 CHECK_VALID_SIZE(outputs.size(), 1);
899
900 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
901 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
902
903 // assuming input is NHWC
904 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
905 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
906
907 // assuming the filter is OHWI : Output, H, W, Input
908 // which is essentially the same as NHWC
909 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
910 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
911
Pablo Tellof0bd6832019-04-26 17:58:13 +0100912 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
913 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
914 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
915 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100916
Matteo Martincigh747ef822018-12-18 09:26:39 +0000917 auto filterTensorAndData = CreateConstTensor(inputs[1],
918 filterTensorInfo,
919 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100920 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100921
James Ward58dec6b2020-09-11 17:32:44 +0100922 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100923
924 if (inputs.size() == 3)
925 {
926 desc.m_BiasEnabled = true;
927 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000928 auto biasTensorAndData = CreateConstTensor(inputs[2],
929 biasTensorInfo,
930 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100931 layer = m_Network->AddConvolution2dLayer(desc,
932 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100933 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100934 layerName.c_str());
935 }
936 else
937 {
938 layer = m_Network->AddConvolution2dLayer(desc,
939 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100940 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100941 layerName.c_str());
942 }
943
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100944 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100945
Sadik Armagand109a4d2020-07-28 10:42:13 +0100946 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +0000947 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100948
949 // register the input connection slots for the layer, connections are made after all layers have been created
950 // only the tensors for the inputs are relevant, exclude the const tensors
951 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000952 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100953
jimfly01c25411c2018-11-14 17:47:22 +0000954 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100955 // register the output connection slots for the layer, connections are made after all layers have been created
956 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
957 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
958}
959
Kevin May7d96b162021-02-03 17:38:41 +0000960void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100961{
962 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
963
964 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
965 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
966
967 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
968
969 DepthwiseConvolution2dDescriptor desc;
970 desc.m_BiasEnabled = false;
971 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
972 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000973 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100974 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100975
976 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
977 CHECK_VALID_SIZE(inputs.size(), 2, 3);
978 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
979 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100980 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
981 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000982
Keith Davis0c2eeac2020-02-11 16:51:50 +0000983 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
984 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +0100985
telsoa01c577f2c2018-08-31 09:22:23 +0100986 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000987 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1], permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100988
Matteo Martincigh747ef822018-12-18 09:26:39 +0000989 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100990 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
991 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000992
993 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100994 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
995 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
996
Matteo Martincigh747ef822018-12-18 09:26:39 +0000997 // Reshape weights as [ H, W, I, M ]
998 filterTensorInfo.SetShape({ filterHeight,
999 filterWidth,
1000 inputTensorInfo.GetShape()[3],
1001 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
1002
Pablo Tellof0bd6832019-04-26 17:58:13 +01001003 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1004 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1005 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1006 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001007
Matteo Martincigh747ef822018-12-18 09:26:39 +00001008 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001009 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001010 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001011
1012 if (inputs.size() == 3)
1013 {
1014 desc.m_BiasEnabled = true;
1015 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00001016 auto biasTensorAndData = CreateConstTensor(inputs[2],
1017 biasTensorInfo,
1018 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +01001019 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1020 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001021 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +01001022 layerName.c_str());
1023 }
1024 else
1025 {
1026 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1027 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001028 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +01001029 layerName.c_str());
1030 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001031 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001032
Sadik Armagand109a4d2020-07-28 10:42:13 +01001033 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +00001034 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001035
1036 // register the input connection slots for the layer, connections are made after all layers have been created
1037 // only the tensors for the inputs are relevant, exclude the const tensors
1038 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001039 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +01001040
jimfly01c25411c2018-11-14 17:47:22 +00001041 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001042 // register the output connection slots for the layer, connections are made after all layers have been created
1043 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1044 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1045}
1046
Kevin May7d96b162021-02-03 17:38:41 +00001047void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsed66d142019-12-06 09:55:55 +00001048{
1049 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1050
1051 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1052 CHECK_VALID_SIZE(inputs.size(), 1);
1053
1054 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1055 CHECK_VALID_SIZE(outputs.size(), 1);
1056
James Ward58dec6b2020-09-11 17:32:44 +01001057 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +00001058
1059 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001060 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +00001061
Sadik Armagand109a4d2020-07-28 10:42:13 +01001062 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Finn Williamsed66d142019-12-06 09:55:55 +00001063 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1064
1065 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1066 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1067
1068 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1069 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1070}
1071
Kevin May7d96b162021-02-03 17:38:41 +00001072void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
Derek Lambertif0176992020-04-28 13:37:49 +01001073{
1074 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1075
1076 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1077 CHECK_VALID_SIZE(inputs.size(), 1);
1078
1079 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1080 CHECK_VALID_SIZE(outputs.size(), 1);
1081
James Ward58dec6b2020-09-11 17:32:44 +01001082 auto layerName = fmt::format("Exp:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01001083
1084 ElementwiseUnaryDescriptor desc;
1085 desc.m_Operation = UnaryOperation::Exp;
1086 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerName.c_str());
1087 ARMNN_ASSERT(layer != nullptr);
1088
Sadik Armagand109a4d2020-07-28 10:42:13 +01001089 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Derek Lambertif0176992020-04-28 13:37:49 +01001090 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1091
1092 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1093 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1094
1095 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1096 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1097}
1098
Kevin May7d96b162021-02-03 17:38:41 +00001099void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
Keith Davis4cd29a02019-09-09 14:49:20 +01001100{
1101 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1102
1103 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001104 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001105
1106 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1107 CHECK_VALID_SIZE(outputs.size(), 1);
1108
James Ward58dec6b2020-09-11 17:32:44 +01001109 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001110 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001111
josh minorba424d22019-11-13 10:55:17 -06001112 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001113 {
1114 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1115 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001116 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1117 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001118 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001119 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001120
Mike Kelly08759e22020-03-02 11:41:31 +00001121 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001122 }
1123
James Conroy05102392020-06-24 15:39:55 +01001124 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001125 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001126 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001127
James Conroy05102392020-06-24 15:39:55 +01001128 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001129 ARMNN_ASSERT(layer != nullptr);
Keith Davis4cd29a02019-09-09 14:49:20 +01001130 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1131
1132 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1133 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1134
1135 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1136 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1137}
1138
Kevin May7d96b162021-02-03 17:38:41 +00001139void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001140{
1141 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1142
1143 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1144 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1145
1146 TransposeConvolution2dDescriptor desc;
1147 desc.m_BiasEnabled = false;
1148 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1149 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1150 desc.m_DataLayout = armnn::DataLayout::NHWC;
1151
1152 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
David Monahan61683802021-01-12 09:11:07 +00001153 if (inputs.size() == 4)
1154 {
1155 desc.m_BiasEnabled = true;
1156 }
1157 else
1158 {
1159 CHECK_VALID_SIZE(inputs.size(), 3);
1160 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001161
1162 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1163 CHECK_VALID_SIZE(outputs.size(), 1);
1164
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001165 if (inputs[0])
1166 {
1167 armnn::TensorInfo tensorInfo = ToTensorInfo(inputs[0]);
1168 std::vector<int> output_shape(tensorInfo.GetNumElements());
1169 if (tensorInfo.GetDataType() == DataType::Signed32)
1170 {
1171 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1172 }
1173 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1174 {
1175 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1176 {
1177 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1178 }
1179 }
1180 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1181 for (int dimension : output_shape)
1182 {
1183 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1184 }
1185 desc.m_OutputShapeEnabled = true;
1186 }
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001187 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001188 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1189
1190 // TfLite uses NHWC tensors
1191 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1192 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1193
1194 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1195 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1196
1197 CalcPadding(inputHeight,
1198 filterHeight,
1199 desc.m_StrideY,
1200 1, // DilationY
1201 desc.m_PadTop,
1202 desc.m_PadBottom,
1203 options->padding);
1204
1205 CalcPadding(inputWidth,
1206 filterWidth,
1207 desc.m_StrideX,
1208 1, // DilationX
1209 desc.m_PadLeft,
1210 desc.m_PadRight,
1211 options->padding);
1212
1213 auto filterTensorAndData = CreateConstTensor(inputs[1],
1214 filterTensorInfo,
1215 armnn::Optional<armnn::PermutationVector&>());
1216
1217 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001218 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001219
David Monahan61683802021-01-12 09:11:07 +00001220 if (desc.m_BiasEnabled)
1221 {
1222 auto biasTensorInfo = ToTensorInfo(inputs[3]);
1223 auto biasConstTensor = CreateConstTensor(inputs[3],
1224 biasTensorInfo,
1225 armnn::Optional<armnn::PermutationVector&>());
1226 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1227 filterTensorAndData.first,
1228 biasConstTensor.first,
1229 layerName.c_str());
1230 }
1231 else
1232 {
1233 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1234 filterTensorAndData.first,
1235 EmptyOptional(),
1236 layerName.c_str());
1237 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001238
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001239 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001240
Sadik Armagand109a4d2020-07-28 10:42:13 +01001241 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001242 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1243
1244 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1245 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001246 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001247
1248 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1249 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1250}
1251
Kevin May7d96b162021-02-03 17:38:41 +00001252void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001253{
1254 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1255}
1256
Kevin May7d96b162021-02-03 17:38:41 +00001257void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001258{
1259 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1260
1261 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1262 CHECK_VALID_SIZE(inputs.size(), 3);
1263
1264 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1265 CHECK_VALID_SIZE(outputs.size(), 1);
1266
1267 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1268 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1269
1270 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1271 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1272
1273 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1274 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1275
1276 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1277 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1278
1279 size_t step = 2;
1280 std::vector<std::pair<unsigned int, unsigned int>> crops;
1281 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1282 {
1283 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1284 }
1285
1286 armnn::BatchToSpaceNdDescriptor desc;
1287 desc.m_BlockShape = blockShape;
1288 desc.m_Crops = crops;
1289 desc.m_DataLayout = armnn::DataLayout::NHWC;
1290
James Ward58dec6b2020-09-11 17:32:44 +01001291 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001292
James Conroy05102392020-06-24 15:39:55 +01001293 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001294 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001295 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1296
1297 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1298 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001299 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1300
1301 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1302 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1303
1304 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1305 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1306}
1307
Kevin May7d96b162021-02-03 17:38:41 +00001308void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson28c94572019-07-18 10:47:03 +01001309{
1310 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1311
1312 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1313 CHECK_VALID_SIZE(inputs.size(), 1);
1314
1315 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1316 CHECK_VALID_SIZE(outputs.size(), 1);
1317
1318 L2NormalizationDescriptor desc;
1319 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001320 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001321 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1322
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001323 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001324
Sadik Armagand109a4d2020-07-28 10:42:13 +01001325 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson28c94572019-07-18 10:47:03 +01001326 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1327
1328 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1329 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1330
1331 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1332 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1333}
1334
Kevin May7d96b162021-02-03 17:38:41 +00001335void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001336{
1337 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1338}
1339
Kevin May7d96b162021-02-03 17:38:41 +00001340void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001341{
1342 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1343
1344 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1345 CHECK_VALID_SIZE(inputs.size(), 2);
1346
1347 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1348 CHECK_VALID_SIZE(outputs.size(), 1);
1349
James Ward58dec6b2020-09-11 17:32:44 +01001350 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001351
1352 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1353 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1354 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001355
Sadik Armagand109a4d2020-07-28 10:42:13 +01001356 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001357 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1358
1359 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1360 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001361 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1362
1363 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001364 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001365
1366 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1367 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1368}
1369
Kevin May7d96b162021-02-03 17:38:41 +00001370void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001371{
1372 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1373
1374 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1375 CHECK_VALID_SIZE(inputs.size(), 2);
1376
1377 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1378 CHECK_VALID_SIZE(outputs.size(), 1);
1379
James Ward58dec6b2020-09-11 17:32:44 +01001380 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001381
1382 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1383 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1384 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001385
Sadik Armagand109a4d2020-07-28 10:42:13 +01001386 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001387 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1388
1389 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1390 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001391 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1392
1393 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001394 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001395
1396 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1397 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1398}
1399
Kevin May7d96b162021-02-03 17:38:41 +00001400void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
1401 size_t operatorIndex,
1402 PoolingAlgorithm algorithm)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001403{
1404 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1405
1406 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1407 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1408
1409 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1410
1411 std::string layerName;
1412
1413 switch (algorithm)
1414 {
1415 case PoolingAlgorithm::Average:
1416 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001417 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001418 break;
1419 case PoolingAlgorithm::Max:
1420 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001421 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001422 break;
1423 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001424 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001425 }
1426
1427 Pooling2dDescriptor desc;
1428
1429 desc.m_PoolType = algorithm;
1430 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1431 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1432 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1433 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1434 desc.m_PaddingMethod = PaddingMethod::Exclude;
1435 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001436 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001437
1438 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1439 CHECK_VALID_SIZE(inputs.size(), 1);
1440 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1441
1442 // assuming input is NHWC
1443 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1444 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1445
Pablo Tellof0bd6832019-04-26 17:58:13 +01001446 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1447 desc.m_PadTop, desc.m_PadBottom, options->padding);
1448 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1449 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001450
1451 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1452 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001453
Sadik Armagand109a4d2020-07-28 10:42:13 +01001454 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001455 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1456
1457 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1458 ARMNN_ASSERT(layer != nullptr);
jimfly01c25411c2018-11-14 17:47:22 +00001459 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001460
1461 // register the input connection slots for the layer, connections are made after all layers have been created
1462 // only the tensors for the inputs are relevant, exclude the const tensors
1463 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001464 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001465
jimfly01c25411c2018-11-14 17:47:22 +00001466 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001467 // register the output connection slots for the layer, connections are made after all layers have been created
1468 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1469 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1470}
1471
Kevin May7d96b162021-02-03 17:38:41 +00001472void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
josh minorba424d22019-11-13 10:55:17 -06001473{
1474 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1475
1476 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1477 CHECK_VALID_SIZE(inputs.size(), 3);
1478 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1479 CHECK_VALID_SIZE(outputs.size(), 1);
1480
1481 SliceDescriptor desc;
1482
1483 // set begin tensor info for slice descriptor
1484 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1485 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1486
1487 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1488 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1489
1490 // set size tensor info for slice descriptor
1491 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1492 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1493
1494 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1495 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1496 desc = SliceDescriptor(begin, size);
1497
James Ward58dec6b2020-09-11 17:32:44 +01001498 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06001499
James Conroy05102392020-06-24 15:39:55 +01001500 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001501 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001502 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1503
1504 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
josh minorba424d22019-11-13 10:55:17 -06001505 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1506
1507 // register the input connection slots for the layer, connections are made after all layers have been created
1508 // only the tensors for the inputs are relevant, exclude the const tensors
1509 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1510 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1511
1512 // register the output connection slots for the layer, connections are made after all layers have been created
1513 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1514 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1515}
1516
Kevin May7d96b162021-02-03 17:38:41 +00001517void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001518{
1519 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1520 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1521 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1522
1523 SoftmaxDescriptor desc;
1524 desc.m_Beta = options->beta;
1525
1526 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1527 CHECK_VALID_SIZE(inputs.size(), 1);
1528 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1529 CHECK_VALID_SIZE(outputs.size(), 1);
1530
James Ward58dec6b2020-09-11 17:32:44 +01001531 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001532 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1533
Sadik Armagand109a4d2020-07-28 10:42:13 +01001534 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
telsoa01c577f2c2018-08-31 09:22:23 +01001535 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1536
1537 // register the input connection slots for the layer, connections are made after all layers have been created
1538 // only the tensors for the inputs are relevant, exclude the const tensors
1539 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1540 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1541
1542 // register the output connection slots for the layer, connections are made after all layers have been created
1543 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1544 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1545}
1546
Kevin May7d96b162021-02-03 17:38:41 +00001547void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001548{
1549 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1550
1551 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1552 CHECK_VALID_SIZE(inputs.size(), 3);
1553
1554 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1555 CHECK_VALID_SIZE(outputs.size(), 1);
1556
1557 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1558 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1559
1560 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1561 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1562
1563 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1564 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1565
1566 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1567 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1568
1569 size_t step = 2;
1570 std::vector<std::pair<unsigned int, unsigned int>> padList;
1571 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1572 {
1573 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1574 }
1575
1576 armnn::SpaceToBatchNdDescriptor desc;
1577 desc.m_BlockShape = blockShape;
1578 desc.m_PadList = padList;
1579 desc.m_DataLayout = armnn::DataLayout::NHWC;
1580
James Ward58dec6b2020-09-11 17:32:44 +01001581 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001582
James Conroy05102392020-06-24 15:39:55 +01001583 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001584 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001585 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1586
1587 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1588 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001589 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1590
1591 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1592 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1593
1594 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1595 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1596}
1597
Kevin May7d96b162021-02-03 17:38:41 +00001598armnn::TensorInfo TfLiteParserImpl::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1599 const armnn::TensorInfo & inputTensorInfo)
telsoa01c577f2c2018-08-31 09:22:23 +01001600{
1601 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1602 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1603 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1604
1605 if (inputTensorInfo.GetNumDimensions() > 4)
1606 {
1607 std::stringstream ss;
1608 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1609 << " shape:" << inputTensorInfo.GetShape() << " "
1610 << CHECK_LOCATION().AsString();
1611 throw ParseException(ss.str());
1612 }
1613
1614 if (squeezeDims.empty())
1615 {
1616 squeezeDims.assign(dimensionSequence,
1617 dimensionSequence+inputTensorInfo.GetNumDimensions());
1618 }
1619
1620 std::vector<uint32_t> outputDims;
1621 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1622 {
1623 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1624 auto currentDimension = inputTensorInfo.GetShape()[i];
1625 if (skipSqueeze || currentDimension != 1)
1626 {
1627 outputDims.push_back(currentDimension);
1628 }
1629 }
1630
1631 if (outputDims.size() > 4)
1632 {
1633 std::stringstream ss;
1634 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1635 << " shape:" << inputTensorInfo.GetShape() << " "
1636 << CHECK_LOCATION().AsString();
1637 throw ParseException(ss.str());
1638 }
1639
1640 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1641 outputDims.data());
1642
1643 // we need to preserve the tensor type and the quantization data as well
1644 TensorInfo outTensorInfo = inputTensorInfo;
1645 outTensorInfo.SetShape(outShape);
1646
1647 return outTensorInfo;
1648}
1649
Kevin May7d96b162021-02-03 17:38:41 +00001650void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001651{
1652 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1653
1654 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1655 CHECK_VALID_SIZE(inputs.size(), 1);
1656
1657 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1658 CHECK_VALID_SIZE(outputs.size(), 1);
1659
1660 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1661 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01001662 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001663
1664 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1665 armnn::TensorInfo outputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00001666 TfLiteParserImpl::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
telsoa01c577f2c2018-08-31 09:22:23 +01001667 inputTensorInfo);
James Conroy05102392020-06-24 15:39:55 +01001668 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01001669
1670 ReshapeDescriptor reshapeDesc;
1671 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1672
telsoa01c577f2c2018-08-31 09:22:23 +01001673 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001674 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001675 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1676
1677 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1678 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1679
1680 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1681 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1682}
1683
Kevin May7d96b162021-02-03 17:38:41 +00001684void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001685{
1686 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1687
1688 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1689 CHECK_VALID_SIZE(inputs.size(), 4);
1690
1691 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1692 CHECK_VALID_SIZE(outputs.size(), 1);
1693
1694 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1695 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1696
1697 StridedSliceDescriptor desc;
1698 desc.m_BeginMask = options->begin_mask;
1699 desc.m_EllipsisMask = options->ellipsis_mask;
1700 desc.m_EndMask = options->end_mask;
1701 desc.m_NewAxisMask = options->new_axis_mask;
1702 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1703 desc.m_DataLayout = armnn::DataLayout::NHWC;
1704
1705 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1706 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1707
1708 std::vector<int> begin(beginTensorInfo.GetNumElements());
1709 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1710
1711 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1712 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1713
1714 std::vector<int> end(endTensorInfo.GetNumElements());
1715 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1716
1717 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1718 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1719
1720 std::vector<int> stride(strideTensorInfo.GetNumElements());
1721 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1722
1723 desc.m_Begin = begin;
1724 desc.m_End = end;
1725 desc.m_Stride = stride;
1726
James Ward58dec6b2020-09-11 17:32:44 +01001727 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001728 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001729 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001730
Sadik Armagand109a4d2020-07-28 10:42:13 +01001731 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001732 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1733
1734 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1735 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1736
1737 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1738 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1739}
1740
Kevin May7d96b162021-02-03 17:38:41 +00001741void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001742{
1743 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1744
1745 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1746 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1747
1748 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1749 CHECK_VALID_SIZE(inputs.size(), 2);
1750
1751 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1752 CHECK_VALID_SIZE(outputs.size(), 1);
1753
1754 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1755 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1756
James Ward58dec6b2020-09-11 17:32:44 +01001757 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001758 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001759 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001760
Sadik Armagand109a4d2020-07-28 10:42:13 +01001761 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001762 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1763
1764 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001765 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001766
1767 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1768
1769 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1770 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1771}
1772
Kevin May7d96b162021-02-03 17:38:41 +00001773void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301774{
1775 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1776
1777 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1778 const auto * options = operatorPtr->builtin_options.AsDivOptions();
1779
1780 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1781 CHECK_VALID_SIZE(inputs.size(), 2);
1782
1783 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1784 CHECK_VALID_SIZE(outputs.size(), 1);
1785
1786 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1787 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1788
James Ward58dec6b2020-09-11 17:32:44 +01001789 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301790 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001791 ARMNN_ASSERT(layer != nullptr);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301792
Sadik Armagand109a4d2020-07-28 10:42:13 +01001793 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301794 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1795
1796 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001797 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301798 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1799
1800 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1801 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1802}
1803
Kevin May7d96b162021-02-03 17:38:41 +00001804void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001805{
1806 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1807
1808 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1809 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1810
1811 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1812 CHECK_VALID_SIZE(inputs.size(), 2);
1813
1814 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1815 CHECK_VALID_SIZE(outputs.size(), 1);
1816
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001817 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1818 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1819
James Ward58dec6b2020-09-11 17:32:44 +01001820 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001821 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001822 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001823
Sadik Armagand109a4d2020-07-28 10:42:13 +01001824 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001825 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1826
1827 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001828 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001829 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1830
1831 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1832 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1833}
1834
Kevin May7d96b162021-02-03 17:38:41 +00001835void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001836{
1837 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1838
1839 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1840 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1841
1842 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1843 CHECK_VALID_SIZE(inputs.size(), 2);
1844
1845 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1846 CHECK_VALID_SIZE(outputs.size(), 1);
1847
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001848 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1849 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1850
James Ward58dec6b2020-09-11 17:32:44 +01001851 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001852 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001853 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001854
Sadik Armagand109a4d2020-07-28 10:42:13 +01001855 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001856 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1857
1858 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001859 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001860 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1861
1862 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1863 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1864}
1865
Kevin May7d96b162021-02-03 17:38:41 +00001866void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001867{
1868 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1869
1870 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1871
1872 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1873 CHECK_VALID_SIZE(outputs.size(), 1);
1874
1875 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1876 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1877
1878 armnn::MeanDescriptor desc;
1879 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1880 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1881 desc.m_Axis = axis;
1882
1883 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001884 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001885
1886 desc.m_KeepDims =
1887 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1888 true : false;
1889
James Ward58dec6b2020-09-11 17:32:44 +01001890 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001891 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001892 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001893
1894 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1895
1896 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1897 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1898
1899 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1900 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1901}
1902
Kevin May7d96b162021-02-03 17:38:41 +00001903void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
Darshan Patel83fcf982020-05-26 22:22:42 +05301904{
1905 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1906
1907 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1908 CHECK_VALID_SIZE(inputs.size(), 1);
1909
1910 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1911 CHECK_VALID_SIZE(outputs.size(), 1);
1912
James Ward58dec6b2020-09-11 17:32:44 +01001913 auto layerName = fmt::format("Neg:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel83fcf982020-05-26 22:22:42 +05301914 armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Neg);
1915 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
1916 ARMNN_ASSERT(layer != nullptr);
1917
Sadik Armagand109a4d2020-07-28 10:42:13 +01001918 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel83fcf982020-05-26 22:22:42 +05301919 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1920
1921 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1922 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1923
1924 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1925 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1926}
1927
Kevin May7d96b162021-02-03 17:38:41 +00001928void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001929{
1930 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1931
Kevin May7d96b162021-02-03 17:38:41 +00001932 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001933
Kevin May7d96b162021-02-03 17:38:41 +00001934 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001935 CHECK_VALID_SIZE(outputs.size(), 1);
1936
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00001937 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1938
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001939 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1940 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1941
1942 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1943 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1944
1945 size_t step = 2;
1946 armnn::PadDescriptor desc;
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00001947 if (inputTensorInfo.IsQuantized())
1948 {
1949 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
1950 }
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001951 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1952 {
1953 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1954 }
1955
James Ward58dec6b2020-09-11 17:32:44 +01001956 auto layerName = fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001957 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001958
1959 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1960 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001961 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1962
1963 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1964 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1965
1966 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1967 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1968}
1969
Kevin May7d96b162021-02-03 17:38:41 +00001970void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan66dedc72019-12-10 16:32:07 +00001971{
1972 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1973
1974 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1975 CHECK_VALID_SIZE(inputs.size(), 1);
1976
1977 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1978 CHECK_VALID_SIZE(outputs.size(), 1);
1979
James Ward58dec6b2020-09-11 17:32:44 +01001980 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001981
1982 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001983 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001984
Sadik Armagand109a4d2020-07-28 10:42:13 +01001985 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001986 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1987
1988 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1989 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1990
1991 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1992 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1993}
Finn Williamsc42c3842019-01-22 14:18:11 +00001994
Kevin May7d96b162021-02-03 17:38:41 +00001995void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01001996{
Finn Williamsc42c3842019-01-22 14:18:11 +00001997 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001998}
1999
Kevin May7d96b162021-02-03 17:38:41 +00002000void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002001{
Finn Williamsc42c3842019-01-22 14:18:11 +00002002 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
2003}
Sadik Armagan58f39192018-09-17 14:14:39 +01002004
Kevin May7d96b162021-02-03 17:38:41 +00002005void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan12239e72020-05-27 11:06:17 +01002006{
Jan Eilers2f746b32020-07-28 14:00:06 +01002007 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01002008}
2009
Kevin May7d96b162021-02-03 17:38:41 +00002010void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsc42c3842019-01-22 14:18:11 +00002011{
2012 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
2013}
2014
Kevin May7d96b162021-02-03 17:38:41 +00002015void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd99851762019-04-09 09:37:38 +01002016{
2017 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
2018}
2019
Kevin May7d96b162021-02-03 17:38:41 +00002020void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
Matthew Sloyan7515d072020-12-16 12:50:01 +00002021{
2022 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
2023}
2024
Kevin May7d96b162021-02-03 17:38:41 +00002025void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
Jan Eilers2f746b32020-07-28 14:00:06 +01002026{
2027 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
2028}
Finn Williamsc42c3842019-01-22 14:18:11 +00002029
Kevin May7d96b162021-02-03 17:38:41 +00002030void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
Finn Williamsc42c3842019-01-22 14:18:11 +00002031{
2032 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01002033 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00002034 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01002035
2036 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2037 CHECK_VALID_SIZE(inputs.size(), 1);
2038
2039 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2040 CHECK_VALID_SIZE(outputs.size(), 1);
2041
James Ward58dec6b2020-09-11 17:32:44 +01002042 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01002043 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00002044 activationDesc.m_Function = activationType;
2045
2046 switch (activationType)
2047 {
2048 case ActivationFunction::ReLu:
2049 {
James Ward58dec6b2020-09-11 17:32:44 +01002050 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002051 break;
2052 }
2053 case ActivationFunction::BoundedReLu:
2054 {
James Ward58dec6b2020-09-11 17:32:44 +01002055 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002056 activationDesc.m_A = 6.0f;
2057 activationDesc.m_B = 0.0f;
2058 break;
2059 }
2060 case ActivationFunction::Sigmoid:
2061 {
James Ward58dec6b2020-09-11 17:32:44 +01002062 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002063 break;
2064 }
Nina Drozd99851762019-04-09 09:37:38 +01002065 case ActivationFunction::TanH:
2066 {
James Ward58dec6b2020-09-11 17:32:44 +01002067 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01002068 activationDesc.m_A = 1.0f;
2069 activationDesc.m_B = 1.0f;
2070 break;
2071 }
Sadik Armagan12239e72020-05-27 11:06:17 +01002072 case ActivationFunction::LeakyReLu:
2073 {
James Ward58dec6b2020-09-11 17:32:44 +01002074 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan12239e72020-05-27 11:06:17 +01002075 const auto * options = operatorPtr->builtin_options.AsLeakyReluOptions();
2076 activationDesc.m_A = options->alpha;
2077 break;
2078 }
Matthew Sloyan7515d072020-12-16 12:50:01 +00002079 case ActivationFunction::Elu:
2080 {
2081 layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
2082 activationDesc.m_A = 1.0f;
2083 break;
2084 }
Jan Eilers2f746b32020-07-28 14:00:06 +01002085 case ActivationFunction::HardSwish:
Matthew Sloyan7515d072020-12-16 12:50:01 +00002086 {
James Ward58dec6b2020-09-11 17:32:44 +01002087 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01002088 break;
Matthew Sloyan7515d072020-12-16 12:50:01 +00002089 }
Finn Williamsc42c3842019-01-22 14:18:11 +00002090 default:
2091 {
2092 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002093 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
2094 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00002095 }
2096 }
2097
2098 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01002099
Sadik Armagand109a4d2020-07-28 10:42:13 +01002100 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan58f39192018-09-17 14:14:39 +01002101 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2102
2103 // register the input connection slots for the layer, connections are made after all layers have been created
2104 // only the tensors for the inputs are relevant, exclude the const tensors
2105 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2106 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2107
2108 // register the output connection slots for the layer, connections are made after all layers have been created
2109 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2110 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2111}
Kevin May7d96b162021-02-03 17:38:41 +00002112armnn::TensorInfo TfLiteParserImpl::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
2113 const std::vector<int32_t> & targetDimsIn)
Sadikb94967b2018-09-19 15:30:00 +01002114{
2115 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2116 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2117
2118 if (stretchDim != targetDimsIn.end())
2119 {
2120 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2121 {
2122 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002123 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01002124 }
2125
2126 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002127 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01002128 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2129
2130 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2131 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2132 }
2133
2134 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2135
2136 TensorInfo reshapeInfo = inputTensorInfo;
2137 reshapeInfo.SetShape(outputShape);
2138
2139 return reshapeInfo;
2140}
2141
Kevin May7d96b162021-02-03 17:38:41 +00002142void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
Sadikb94967b2018-09-19 15:30:00 +01002143{
2144 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2145
2146 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002147
2148 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2149 CHECK_VALID_SIZE(outputs.size(), 1);
2150
2151 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2152 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002153 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002154
2155 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00002156 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01002157 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00002158
Jan Eilersbac9b352020-07-13 13:40:24 +01002159 // Extracting new shape for the output
2160 // There are two ways it can be passed
2161 // * First is to define the target shape in the operator built-in options
2162 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00002163 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01002164 bool targetShapeFound = false;
2165 // Check if built-in options were given
2166 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00002167 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002168 // make sure the parameter is given
2169 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00002170 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002171 targetShape = options->new_shape;
2172 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00002173 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002174 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002175
2176 // If there is no built-in option given or if the built-in new_shape parameter was empty
2177 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00002178 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002179 // Check for a second input tensor
2180 if (inputs.size() > 1 && inputs[1] != nullptr)
2181 {
2182 if (inputs[1]->is_variable)
2183 {
2184 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
2185 }
2186
2187 if (inputs[1]->shape.size() != 1)
2188 {
2189 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
2190 }
2191
2192 if (inputs[1]->type != tflite::TensorType_INT32)
2193 {
2194 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
2195 }
2196
2197 // Extract target shape from input
2198 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2199 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
Sadik Armagan19a1c032021-01-20 12:17:00 +00002200 if (!values)
2201 {
2202 ARMNN_THROW_PARSE_EXCEPTION("Reshape operator target shape input buffer data is null");
2203 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002204 for (int i=0; i < inputs[1]->shape[0]; ++i)
2205 {
2206 targetShape.push_back(values[i]);
2207 }
2208 }
2209 else
Derek Lambertic9e52792020-03-11 11:42:26 +00002210 {
2211 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
2212 "At least one method required");
2213 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002214 }
2215
kevmay0171972a82018-12-17 14:28:03 +00002216 armnn::TensorInfo reshapeOutputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00002217 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01002218
kevmay0171972a82018-12-17 14:28:03 +00002219 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002220 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
2221 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00002222 {
2223 std::stringstream ss;
2224 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002225 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00002226 << " does not equal output shape "
2227 << actualOutputTensorInfo.GetShape()
2228 << ": "
2229 << CHECK_LOCATION().AsString();
2230 throw ParseException(ss.str());
2231 }
2232
Sadikb94967b2018-09-19 15:30:00 +01002233 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00002234 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01002235
Sadikb94967b2018-09-19 15:30:00 +01002236 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002237 ARMNN_ASSERT(layer != nullptr);
kevmay0171972a82018-12-17 14:28:03 +00002238 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01002239
2240 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2241 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2242
2243 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2244 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2245}
2246
Kevin May7d96b162021-02-03 17:38:41 +00002247void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002248{
Sadik Armagana3b31f02019-12-05 09:08:53 +00002249 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
2250}
2251
Kevin May7d96b162021-02-03 17:38:41 +00002252void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagana3b31f02019-12-05 09:08:53 +00002253{
2254 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
2255}
2256
Kevin May7d96b162021-02-03 17:38:41 +00002257void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
Sadik Armagana3b31f02019-12-05 09:08:53 +00002258{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002259 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2260
2261 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2262 CHECK_VALID_SIZE(inputs.size(), 2);
2263
2264 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2265 CHECK_VALID_SIZE(outputs.size(), 1);
2266
2267 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
2268
2269 // Data for the parsed tensor args (size) must be stored locally.
2270 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2271
2272 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2273 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2274
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002275 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002276 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002277 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002278 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2279 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002280
James Ward58dec6b2020-09-11 17:32:44 +01002281 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00002282
2283 switch (resizeMethod)
2284 {
2285 case ResizeMethod::Bilinear:
2286 {
James Ward58dec6b2020-09-11 17:32:44 +01002287 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002288
2289 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2290 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2291
David Monahan4a0c9b92020-05-30 09:48:39 +01002292 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002293 break;
2294 }
2295 case ResizeMethod::NearestNeighbor:
2296 {
James Ward58dec6b2020-09-11 17:32:44 +01002297 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00002298 break;
2299 }
2300 default:
2301 {
2302 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002303 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
2304 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00002305 }
2306 }
2307
James Conroy05102392020-06-24 15:39:55 +01002308 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002309 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002310 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2311
2312 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
2313 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002314 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2315
2316 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2317 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2318
2319 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2320 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2321}
2322
Kevin May7d96b162021-02-03 17:38:41 +00002323void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan479045b2018-10-01 11:51:37 +01002324{
2325 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2326
2327 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2328 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2329
2330 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2331
2332 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2333 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2334 CHECK_VALID_SIZE(outputs.size(), 1);
2335
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002336 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2337 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002338
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002339 const unsigned int concatDimInput = static_cast<unsigned int>(
2340 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002341
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002342 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2343 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002344
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002345 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002346
2347 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2348 {
2349 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2350
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002351 // This set up concatDescriptor view origin
2352 armnnUtils::ProcessConcatInputTensorInfo(
2353 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002354 }
2355
James Ward58dec6b2020-09-11 17:32:44 +01002356 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002357 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002358
Jim Flynn906f9462019-05-10 13:55:21 +01002359 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002360 ARMNN_ASSERT(layer != nullptr);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002361 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002362
James Conroy05102392020-06-24 15:39:55 +01002363 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002364 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002365
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002366 // add fused activation layer
2367 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002368
Sadik Armagan479045b2018-10-01 11:51:37 +01002369 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2370 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2371}
2372
Kevin May7d96b162021-02-03 17:38:41 +00002373void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002374{
2375 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2376
2377 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2378 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2379
2380 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2381
2382 FullyConnectedDescriptor desc;
2383 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002384 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002385
2386 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2387 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2388 CHECK_VALID_SIZE(outputs.size(), 1);
2389
2390 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2391
2392 // Fully Connected Layer accepts two dimensional weights input
2393 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2394 if (weightsDimension != 2)
2395 {
2396 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002397 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
2398 "Node {}",
2399 weightsDimension,
2400 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002401 }
2402
Matteo Martincigh747ef822018-12-18 09:26:39 +00002403 auto filterTensorAndData = CreateConstTensor(inputs[1],
2404 filterTensorInfo,
2405 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002406 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01002407 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002408
2409 if (inputs.size() == 3)
2410 {
2411 desc.m_BiasEnabled = true;
2412 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00002413 auto biasTensorAndData = CreateConstTensor(inputs[2],
2414 biasTensorInfo,
2415 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002416 layer = m_Network->AddFullyConnectedLayer(desc,
2417 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002418 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002419 layerName.c_str());
2420 }
2421 else
2422 {
2423 layer = m_Network->AddFullyConnectedLayer(desc,
2424 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002425 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002426 layerName.c_str());
2427 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002428 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002429
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002430 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2431
2432 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2433
2434 if (inputTensorInfo.GetNumDimensions() > 2)
2435 {
2436 // Add reshape to flatten to 2D [batch_size, input_size],
2437 // where "input_size" corresponds to the number of inputs to the layer,
2438 // matching the second dimension of weights,
2439 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2440 std::vector<unsigned int> reshapedDimensions(2);
2441 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2442 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2443
2444 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2445 {
2446 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002447 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
2448 reshapedDimensions[1],
2449 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002450 }
2451
2452 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2453 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2454
James Ward58dec6b2020-09-11 17:32:44 +01002455 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002456 armnn::ReshapeDescriptor desc;
2457 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2458 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2459
2460 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2461 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2462
2463 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2464 }
2465 else
2466 {
2467 // register the input connection slot for the layer
2468 // only the tensors for the inputs are relevant, exclude the const tensors
2469 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2470 }
2471
Sadik Armagand109a4d2020-07-28 10:42:13 +01002472 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002473 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2474
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002475 // we need to add the activation layer and fortunately we don't need to care about the data layout
2476 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2477 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002478
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002479 // register the output connection slots for the layer, connections are made after all layers have been created
2480 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2481 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2482}
2483
Kevin May7d96b162021-02-03 17:38:41 +00002484void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
keidav011b3e2ea2019-02-21 10:07:37 +00002485{
2486 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2487
2488 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2489
2490 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2491 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2492 CHECK_VALID_SIZE(outputs.size(), 4);
2493
2494 // Obtain custom options from flexbuffers
2495 auto custom_options = operatorPtr->custom_options;
2496 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2497
2498 // Obtain descriptor information from tf lite
2499 DetectionPostProcessDescriptor desc;
2500 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2501 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2502 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2503 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2504 desc.m_NumClasses = m["num_classes"].AsUInt32();
2505 desc.m_ScaleH = m["h_scale"].AsFloat();
2506 desc.m_ScaleW = m["w_scale"].AsFloat();
2507 desc.m_ScaleX = m["x_scale"].AsFloat();
2508 desc.m_ScaleY = m["y_scale"].AsFloat();
2509
keidav0107d58c72019-02-26 11:57:39 +00002510 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002511 {
keidav0107d58c72019-02-26 11:57:39 +00002512 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002513 }
2514 if (!(m["detections_per_class"].IsNull()))
2515 {
2516 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2517 }
2518
2519 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2520 {
2521 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2522 "must be positive and less than or equal to 1.");
2523 }
2524
2525 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2526 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2527 armnn::Optional<armnn::PermutationVector&>());
2528
James Ward58dec6b2020-09-11 17:32:44 +01002529 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
keidav011b3e2ea2019-02-21 10:07:37 +00002530 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2531 layerName.c_str());
2532
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002533 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00002534
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002535 // The model does not specify the output shapes.
2536 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2537 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2538 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2539 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2540 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2541 m_OverridenOutputShapes.push_back({ 1 });
2542
keidav011b3e2ea2019-02-21 10:07:37 +00002543 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2544 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002545 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002546 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2547 }
2548
2549 // Register the input connection slots for the layer, connections are made after all layers have been created
2550 // only the tensors for the inputs are relevant, exclude the const tensors
2551 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2552 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2553
2554 // Register the output connection slots for the layer, connections are made after all layers have been created
2555 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2556 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2557 outputTensorIndexes[1],
2558 outputTensorIndexes[2],
2559 outputTensorIndexes[3]});
2560}
2561
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002562/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
Kevin May7d96b162021-02-03 17:38:41 +00002563void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002564{
2565 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2566
2567 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2568 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2569 CHECK_VALID_SIZE(outputs.size(), 1);
2570
2571 if (inputs.size() < 1)
2572 {
2573 throw ParseException("Pack must have at least one input.");
2574 }
2575
2576 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2577 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2578
2579 StackDescriptor desc;
2580 desc.m_Axis = static_cast<uint32_t>(options->axis);
2581 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2582
2583 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2584 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2585 desc.m_InputShape = inputTensorInfo.GetShape();
2586
James Ward58dec6b2020-09-11 17:32:44 +01002587 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002588 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2589
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002590 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002591
Sadik Armagand109a4d2020-07-28 10:42:13 +01002592 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002593 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2594
2595 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2596 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2597
2598 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2599 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2600}
2601
Kevin May7d96b162021-02-03 17:38:41 +00002602void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd200e3802019-04-15 09:47:39 +01002603{
2604 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2605
2606 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2607 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2608
2609 // This unpackAxis indicates the axis to unpack
2610 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2611
2612 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2613 CHECK_VALID_SIZE(inputs.size(), 1);
2614
2615 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002616
2617 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2618 {
2619 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002620 fmt::format("The unpack axis: {} cannot be greater than or equal to "
2621 "the number of input dimension {} {}",
2622 unpackAxis,
2623 inputTensorInfo.GetNumDimensions(),
2624 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002625 }
2626
Nina Drozd200e3802019-04-15 09:47:39 +01002627 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2628 // If num is not defined, automatically infer from the length of the dimension axis.
2629 if(unpackNum == 0)
2630 {
2631 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2632 }
2633
2634 // If unpack number cannot be inferred and is still zero, throw ParseException.
2635 if(unpackNum == 0)
2636 {
2637 throw ParseException("Number to unpack must greater than zero.");
2638 }
2639
2640 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2641 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2642
2643 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2644 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2645
2646 // Add current input shape to unpackDimSizes
2647 for (unsigned int i = 0; i < inputDimSize; ++i)
2648 {
2649 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2650 }
2651
2652 if (unpackDimSizes[unpackAxis] != unpackNum)
2653 {
2654 throw ParseException("Number to unpack must be the same as length of the dimension to "
2655 "unpack along.");
2656 }
2657
2658 unpackDimSizes[unpackAxis] /= unpackNum;
2659
2660 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2661 for (unsigned int j = 0; j < unpackNum; ++j)
2662 {
2663 // Set the size of the views.
2664 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2665 {
2666 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2667 }
2668 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2669 }
2670
James Ward58dec6b2020-09-11 17:32:44 +01002671 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01002672 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002673 ARMNN_ASSERT(layer != nullptr);
Nina Drozd200e3802019-04-15 09:47:39 +01002674
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002675 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2676 unpackDimSizes.data());
2677
Nina Drozd200e3802019-04-15 09:47:39 +01002678 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2679 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2680
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002681 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2682 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2683 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002684 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01002685 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002686 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002687 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002688 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2689
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002690 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2691 outputTensorInfo.GetDataType(),
2692 outputTensorInfo.GetQuantizationScale(),
2693 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002694 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2695
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002696 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002697
2698 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2699 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2700 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2701 }
Nina Drozd200e3802019-04-15 09:47:39 +01002702}
2703
Kevin May7d96b162021-02-03 17:38:41 +00002704void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd0324f482019-04-08 10:52:10 +01002705{
2706 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2707
2708 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2709 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2710
2711 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2712
Nina Drozd200e3802019-04-15 09:47:39 +01002713 // If number of splits cannot be inferred and is zero, throw ParseException.
2714 if(numSplits == 0)
2715 {
2716 throw ParseException("Number to splits must greater than zero.");
2717 }
2718
Nina Drozd0324f482019-04-08 10:52:10 +01002719 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2720 CHECK_VALID_SIZE(inputs.size(), 2);
2721 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2722 CHECK_VALID_SIZE(outputs.size(), numSplits);
2723
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002724 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2725 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002726
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002727 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2728 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2729 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2730
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002731 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002732 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002733
Nina Drozd0324f482019-04-08 10:52:10 +01002734 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002735 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002736 {
2737 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002738 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
2739 inputTensorInfo.GetNumDimensions(),
2740 MaxNumOfTensorDimensions,
2741 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01002742 }
2743
2744 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2745
2746 // Add current input shape to splitterDimSizes
2747 for (unsigned int i = 0; i < inputDimSize; ++i)
2748 {
2749 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2750 }
2751
2752 if (splitterDimSizes[splitDim] % numSplits != 0)
2753 {
2754 throw ParseException("Number of splits must evenly divide the dimension");
2755 }
2756 splitterDimSizes[splitDim] /= numSplits;
2757
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002758 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002759 for (unsigned int j = 0; j < numSplits; ++j)
2760 {
2761 // Set the size of the views.
2762 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2763 {
2764 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2765 }
2766 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2767 }
2768
James Ward58dec6b2020-09-11 17:32:44 +01002769 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01002770 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002771 ARMNN_ASSERT(layer != nullptr);
Nina Drozd0324f482019-04-08 10:52:10 +01002772
2773 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002774 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002775
Nina Drozd0324f482019-04-08 10:52:10 +01002776 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2777 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002778 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002779 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002780 }
2781
2782 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2783 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2784}
2785
Derek Lambertif0176992020-04-28 13:37:49 +01002786unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
2787{
2788 int numDims = armnn::numeric_cast<int>(numDimsIn);
2789 int v = idx < 0 ? numDims + idx : idx;
2790 ARMNN_ASSERT(v >= 0);
2791 ARMNN_ASSERT(v < numDims);
2792
2793 return static_cast<unsigned int>(v);
2794}
2795
Kevin May7d96b162021-02-03 17:38:41 +00002796void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
Derek Lambertif0176992020-04-28 13:37:49 +01002797{
2798 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2799
2800 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Ryan OShea86704732020-05-26 11:41:04 +01002801 const auto * options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01002802
2803 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2804 CHECK_VALID_SIZE(inputs.size(), 3);
2805
2806 auto& inputTensor = inputs[0];
2807 auto& splitsTensor = inputs[1];
2808 auto& axisTensor = inputs[2];
2809
2810 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
2811 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
2812 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
2813 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
2814
2815 // Inputs
2816 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2817 if (inputDimSize > MaxNumOfTensorDimensions)
2818 {
2819 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002820 fmt::format("The number of dimensions: {} for input tensors of the "
2821 "SplitV op cannot be greater than {} {}",
2822 inputTensorInfo.GetNumDimensions(),
2823 MaxNumOfTensorDimensions,
2824 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01002825 }
2826
2827 // Get split axis
2828 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
2829 std::vector<int> axisData(axisTensorInfo.GetNumElements());
2830 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2831 const unsigned int splitDim = ComputeWrappedIndex(axisData[0], inputTensorInfo.GetNumDimensions());
2832
Derek Lambertif0176992020-04-28 13:37:49 +01002833 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01002834 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01002835 unsigned int numSplits{0};
2836
2837 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01002838 {
2839 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01002840 }
2841 else
2842 {
Ryan OShea86704732020-05-26 11:41:04 +01002843 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01002844 }
2845
2846 if (numSplits <=0)
2847 {
2848 throw ParseException("SplitV has invalid number of splits");
2849 }
2850
Jan Eilersc0761e92020-06-29 16:48:44 +01002851 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01002852 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01002853 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01002854
Jan Eilersc0761e92020-06-29 16:48:44 +01002855 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01002856 int numInferred{0};
2857 unsigned int inferIdx{0};
2858 int splitSum{0};
2859 for (auto split : splitsData)
2860 {
2861 if (split < 0)
2862 {
2863 numInferred++;
2864 inferIdx = idx;
2865 }
2866 else
2867 {
2868 splitSum += split;
2869 }
2870 idx++;
2871 }
2872 // Check for inferred Axis
2873 if (numInferred == 0)
2874 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002875 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01002876 {
2877 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
2878 }
2879 }
2880 else if (numInferred == 1)
2881 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002882 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01002883 }
2884 else
2885 {
2886 throw ParseException("Cannot infer split size for more than one split");
2887 }
2888
Derek Lambertif0176992020-04-28 13:37:49 +01002889 //Ouput size validation
2890 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2891 CHECK_VALID_SIZE(outputs.size(), numSplits);
2892
2893 // Setup Armnn descriptor
2894 SplitterDescriptor splitDesc(numSplits, inputDimSize);
2895 unsigned int accumSplit = 0;
2896 for (unsigned int j = 0; j < numSplits; ++j)
2897 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002898 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01002899
2900 // Set the size of the views.
2901 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
2902 {
2903 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
2904 if (dimIdx == splitDim)
2905 {
2906 dimSize = splitSize;
2907 }
2908 splitDesc.SetViewSize(j, dimIdx, dimSize);
2909 }
2910
2911 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
2912 accumSplit += splitSize;
2913 }
2914
James Ward58dec6b2020-09-11 17:32:44 +01002915 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01002916 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002917 ARMNN_ASSERT(layer != nullptr);
Derek Lambertif0176992020-04-28 13:37:49 +01002918
2919 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2920 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2921
2922 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2923 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002924 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01002925 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
2926 }
2927
2928 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2929 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2930}
2931
Kevin May7d96b162021-02-03 17:38:41 +00002932void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
Inki Daed4619e22020-09-10 15:33:54 +09002933{
2934 const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2935 const auto *options = operatorPtr->builtin_options.AsArgMaxOptions();
2936
2937 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2938 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2939 CHECK_VALID_SIZE(inputs.size(), 2);
2940
2941 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2942 CHECK_VALID_SIZE(outputs.size(), 1);
2943
James Ward58dec6b2020-09-11 17:32:44 +01002944 auto layerName = fmt::format("ArgMax:{}:{}", subgraphIndex, operatorIndex);
Inki Daed4619e22020-09-10 15:33:54 +09002945
2946 armnn::TensorInfo sizeTensorInfo0 = ToTensorInfo(inputs[0]);
2947 armnn::TensorInfo sizeTensorInfo1 = ToTensorInfo(inputs[1]);
2948
2949 // Get const axis value from model and set it to descriptor.
2950 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2951
2952 ArgMinMaxDescriptor desc;
2953 desc.m_Axis = axisBufferPtr->data.data()[0];
2954 // If output_type is int32 then set Signed32 else Signed64. Default type is Signed64.
2955 desc.m_Output_Type = options->output_type == 3 ? armnn::DataType::Signed32 : armnn::DataType::Signed64;
2956 desc.m_Function = ArgMinMaxFunction::Max;
2957
2958 // Register a ArgMax layer.
2959 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerName.c_str());
2960
2961 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2962 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2963
2964 // Register input tensor to the layer.
2965 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2966 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2967
2968 // Register output tensor to the layer.
2969 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2970 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2971}
2972
Kevin May7d96b162021-02-03 17:38:41 +00002973void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00002974{
2975 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2976
Kevin May7d96b162021-02-03 17:38:41 +00002977 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00002978 CHECK_VALID_SIZE(inputs.size(), 2);
Kevin May7d96b162021-02-03 17:38:41 +00002979 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00002980 CHECK_VALID_SIZE(outputs.size(), 1);
2981
2982 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2983 armnn::TensorInfo indicesTensorInfo = ToTensorInfo(inputs[1]);
2984 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
2985
2986 armnn::GatherDescriptor gatherDescriptor;
2987
2988 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2989 const auto * options = operatorPtr->builtin_options.AsGatherOptions();
2990 auto axis = options->axis;
2991
2992 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
2993 auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
2994 auto outputDimensions = outputTensorInfo.GetNumDimensions();
2995 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
2996 {
2997 throw ParseException(
2998 fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
2999 axis,
3000 inputDimensions, inputDimensions,
3001 CHECK_LOCATION().AsString()));
3002 }
3003 if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
3004 {
3005 throw ParseException(
3006 fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
3007 outputDimensions,
3008 inputDimensions, indicesDimensions,
3009 CHECK_LOCATION().AsString()));
3010 }
3011
3012 gatherDescriptor.m_Axis = axis;
3013
3014 auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
3015 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
3016 ARMNN_ASSERT(layer != nullptr);
3017 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3018
3019 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3020 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3021
3022 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3023 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3024}
3025
Kevin May7d96b162021-02-03 17:38:41 +00003026void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00003027{
3028 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3029
Kevin May7d96b162021-02-03 17:38:41 +00003030 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003031 CHECK_VALID_SIZE(inputs.size(), 1);
Kevin May7d96b162021-02-03 17:38:41 +00003032 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003033 CHECK_VALID_SIZE(outputs.size(), 1);
3034
3035 armnn::DepthToSpaceDescriptor descriptor;
3036
3037 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3038 const auto * options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
3039 auto blockSize = options->block_size;
3040 if (blockSize < 2)
3041 {
3042 throw ParseException(
3043 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
3044 blockSize,
3045 CHECK_LOCATION().AsString()));
3046 }
3047 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
3048
3049 auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
3050 IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
3051 ARMNN_ASSERT(layer != nullptr);
3052 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3053 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3054
3055 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3056 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3057
3058 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3059 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3060}
3061
Kevin May7d96b162021-02-03 17:38:41 +00003062void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003063{
Sadik Armagana2747482021-02-09 10:28:54 +00003064 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Sum);
3065}
3066
3067void TfLiteParserImpl::ParseReduceMax(size_t subgraphIndex, size_t operatorIndex)
3068{
3069 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Max);
3070}
3071
3072void TfLiteParserImpl::ParseReduceMin(size_t subgraphIndex, size_t operatorIndex)
3073{
3074 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Min);
3075}
3076
3077void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, ReduceOperation reduceOperation)
3078{
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003079 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3080
3081 const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3082 const auto *options = operatorPtr->builtin_options.AsReducerOptions();
3083
3084 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3085 CHECK_VALID_SIZE(inputs.size(), 2);
3086
3087 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3088 CHECK_VALID_SIZE(outputs.size(), 1);
3089
Sadik Armagana2747482021-02-09 10:28:54 +00003090 auto layerName = fmt::format("Reduce:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003091
3092 armnn::TensorInfo inputTensorInfo0 = ToTensorInfo(inputs[0]);
3093 armnn::TensorInfo inputTensorInfo1 = ToTensorInfo(inputs[1]);
3094 TensorShape input0Shape = inputTensorInfo0.GetShape();
3095
3096 ReduceDescriptor desc;
3097
3098 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3099 // Get const axis value from model and set it to descriptor.
3100 if (axisBufferPtr != nullptr)
3101 {
3102 for (uint32_t i = 0; i < inputTensorInfo1.GetNumElements(); ++i)
3103 {
3104 desc.m_vAxis.push_back(armnnUtils::GetUnsignedAxis(inputTensorInfo0.GetNumDimensions(),
3105 axisBufferPtr->data.data()[i]));
3106 }
3107 }
Sadik Armagana2747482021-02-09 10:28:54 +00003108 else
3109 {
3110 for (uint32_t i = 0; i < inputTensorInfo0.GetNumDimensions(); ++i)
3111 {
3112 desc.m_vAxis.push_back(i);
3113 }
3114 }
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003115
3116 desc.m_TargetHeight = input0Shape[1];
3117 desc.m_TargetWidth = input0Shape[2];
3118 desc.m_KeepDims = options->keep_dims;
Sadik Armagana2747482021-02-09 10:28:54 +00003119 desc.m_ReduceOperation = reduceOperation;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003120
3121 // Register a new layer object, Sum.
3122 IConnectableLayer *layer = m_Network->AddReduceLayer(desc, layerName.c_str());
3123
3124 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
3125 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3126
3127 // Register input tensor to the layer.
3128 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3129 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3130
3131 // Register output tensor to the layer.
3132 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3133 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3134}
3135
Kevin May7d96b162021-02-03 17:38:41 +00003136armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
3137 unsigned int outputSlot,
3138 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01003139{
3140 ActivationDescriptor activationDesc;
3141 std::string layerName = prevLayer->GetName();
3142
3143 switch(activationType)
3144 {
3145 case tflite::ActivationFunctionType_NONE:
3146 {
3147 // this is a no-op: return previous layer
3148 return prevLayer;
3149 }
3150 case tflite::ActivationFunctionType_RELU:
3151 {
3152 activationDesc.m_Function = ActivationFunction::ReLu;
3153 layerName += ":RELU";
3154 break;
3155 }
3156 case tflite::ActivationFunctionType_RELU6:
3157 {
3158 activationDesc.m_Function = ActivationFunction::BoundedReLu;
3159 activationDesc.m_A = 6.0f;
3160 activationDesc.m_B = 0.0f;
3161 layerName += ":RELU6";
3162 break;
3163 }
3164 case tflite::ActivationFunctionType_TANH:
3165 {
3166 activationDesc.m_Function = ActivationFunction::TanH;
3167 activationDesc.m_A = 1.0f;
3168 activationDesc.m_B = 1.0f;
3169 layerName += ":TANH";
3170 break;
3171 }
3172
3173 // I only put these here as a reminder what others we could support
3174 case tflite::ActivationFunctionType_RELU_N1_TO_1:
3175 case tflite::ActivationFunctionType_SIGN_BIT:
3176 default:
3177 {
3178 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003179 fmt::format("TfLite parser doesn't suppport fused activation: "
3180 "{}/{} {} ",
3181 activationType,
3182 tflite::EnumNameActivationFunctionType(activationType),
3183 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003184
3185 }
3186 }
3187
3188 IConnectableLayer* activationLayer =
3189 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
3190
3191 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
3192 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
3193 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
3194 return activationLayer;
3195}
3196
Kevin May7d96b162021-02-03 17:38:41 +00003197TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char * fileName)
telsoa01c577f2c2018-08-31 09:22:23 +01003198{
3199 if (fileName == nullptr)
3200 {
James Ward58dec6b2020-09-11 17:32:44 +01003201 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003202 CHECK_LOCATION().AsString()));
3203 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01003204 std::error_code errorCode;
3205 fs::path pathToFile(fileName);
3206 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01003207 {
James Ward58dec6b2020-09-11 17:32:44 +01003208 //fmt::format() could not be used here (format error)
3209 std::stringstream msg;
3210 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
3211 << " " << CHECK_LOCATION().AsString();
3212
3213 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01003214 }
3215 std::ifstream file(fileName, std::ios::binary);
3216 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3217 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3218 fileContent.size());
3219}
3220
Kevin May7d96b162021-02-03 17:38:41 +00003221TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
telsoa01c577f2c2018-08-31 09:22:23 +01003222{
3223 if (binaryContent == nullptr)
3224 {
James Ward58dec6b2020-09-11 17:32:44 +01003225 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003226 CHECK_LOCATION().AsString()));
3227 }
3228 flatbuffers::Verifier verifier(binaryContent, len);
3229 if (verifier.VerifyBuffer<tflite::Model>() == false)
3230 {
3231 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003232 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
3233 "flatbuffers format. size:{} {}",
3234 len,
3235 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003236 }
3237 return tflite::UnPackModel(binaryContent);
3238}
3239
Kevin May7d96b162021-02-03 17:38:41 +00003240TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetInputs(const ModelPtr & model,
3241 size_t subgraphIndex,
3242 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003243{
3244 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3245
Derek Lambertiff05cc52019-04-26 13:05:17 +01003246 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3247 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003248
3249 size_t inputCount = operatorPtr->inputs.size();
3250 TensorRawPtrVector result(inputCount);
3251 for (size_t i=0; i<inputCount; ++i)
3252 {
3253 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003254 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003255 }
3256 return result;
3257}
3258
Kevin May7d96b162021-02-03 17:38:41 +00003259TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetOutputs(const ModelPtr & model,
3260 size_t subgraphIndex,
3261 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003262{
3263 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3264
Derek Lambertiff05cc52019-04-26 13:05:17 +01003265 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3266 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003267
3268 size_t outputCount = operatorPtr->outputs.size();
3269 TensorRawPtrVector result(outputCount);
3270 for (size_t i=0; i<outputCount; ++i)
3271 {
3272 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3273 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003274 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003275 }
3276 return result;
3277}
3278
Kevin May7d96b162021-02-03 17:38:41 +00003279TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphInputs(const ModelPtr & model,
3280 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003281{
3282 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003283 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003284
Derek Lambertiff05cc52019-04-26 13:05:17 +01003285 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003286 TensorIdRawPtrVector result(inputCount);
3287 for (size_t i=0; i<inputCount; ++i)
3288 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003289 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01003290 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003291 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003292 }
3293 return result;
3294}
3295
Kevin May7d96b162021-02-03 17:38:41 +00003296TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphOutputs(const ModelPtr & model,
3297 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003298{
3299 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003300 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003301
Derek Lambertiff05cc52019-04-26 13:05:17 +01003302 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003303 TensorIdRawPtrVector result(outputCount);
3304 for (size_t i=0; i<outputCount; ++i)
3305 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003306 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3307 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003308 }
3309 return result;
3310}
3311
Kevin May7d96b162021-02-03 17:38:41 +00003312std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
3313 size_t subgraphIndex,
3314 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003315{
3316 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003317 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3318 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003319 return operatorPtr->inputs;
3320}
3321
Kevin May7d96b162021-02-03 17:38:41 +00003322std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
3323 size_t subgraphIndex,
3324 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003325{
3326 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003327 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3328 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003329 return operatorPtr->outputs;
3330}
3331
Kevin May7d96b162021-02-03 17:38:41 +00003332void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
3333 size_t operatorIndex,
3334 IConnectableLayer* layer,
3335 const std::vector<unsigned int>& tensorIndexes)
telsoa01c577f2c2018-08-31 09:22:23 +01003336{
3337 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003338 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003339 if (tensorIndexes.size() != layer->GetNumInputSlots())
3340 {
3341 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003342 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
3343 " for subgraph:{} operator index:{} {}",
3344 tensorIndexes.size(),
3345 layer->GetNumInputSlots(),
3346 subgraphIndex,
3347 operatorIndex,
3348 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003349 }
3350
3351 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
3352 {
3353 unsigned int tensorIndex = tensorIndexes[slotIndex];
3354 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
3355 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
3356 }
3357}
3358
Kevin May7d96b162021-02-03 17:38:41 +00003359void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
3360 size_t operatorIndex,
3361 IConnectableLayer* layer,
3362 const std::vector<unsigned int>& tensorIndexes)
telsoa01c577f2c2018-08-31 09:22:23 +01003363{
3364 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003365 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003366 if (tensorIndexes.size() != layer->GetNumOutputSlots())
3367 {
3368 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003369 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
3370 " for subgraph:{} operator index:{} {}",
3371 tensorIndexes.size(),
3372 layer->GetNumOutputSlots(),
3373 subgraphIndex,
3374 operatorIndex,
3375 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003376 }
3377
3378 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
3379 {
3380 unsigned int tensorIndex = tensorIndexes[slotIndex];
3381 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
3382 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3383 }
3384}
3385
Kevin May7d96b162021-02-03 17:38:41 +00003386void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003387{
3388 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3389
3390 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
3391 for (auto const & tensorIdAndPtr : inputs)
3392 {
3393 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3394 IConnectableLayer* layer =
3395 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3396
3397 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
3398 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3399
3400 RegisterOutputSlots(subgraphIndex,
3401 VIRTUAL_OPERATOR_ID,
3402 layer,
3403 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3404 }
3405}
3406
Kevin May7d96b162021-02-03 17:38:41 +00003407void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003408{
3409 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3410
3411 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
3412 for (auto const & tensorIdAndPtr : outputs)
3413 {
3414 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3415 IConnectableLayer* layer =
3416 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3417
3418 RegisterInputSlots(subgraphIndex,
3419 VIRTUAL_OPERATOR_ID,
3420 layer,
3421 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3422 }
3423}
3424
Kevin May7d96b162021-02-03 17:38:41 +00003425void TfLiteParserImpl::SetupConstantLayers(size_t subgraphIndex)
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003426{
3427 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3428
Derek Lambertiff05cc52019-04-26 13:05:17 +01003429 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003430 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
3431 {
3432 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
3433 {
3434 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
3435 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
3436 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003437 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003438 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
3439 auto tensorAndData = CreateConstTensor(tensorPtr,
3440 tensorInfo,
3441 armnn::Optional<armnn::PermutationVector&>());
3442
James Ward58dec6b2020-09-11 17:32:44 +01003443 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003444 IConnectableLayer *layer =
3445 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
3446
3447 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3448 RegisterOutputSlots(subgraphIndex,
3449 VIRTUAL_OPERATOR_ID,
3450 layer,
3451 { tensorIndex });
3452
3453 }
3454 }
3455 }
3456}
3457
telsoa01c577f2c2018-08-31 09:22:23 +01003458// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Kevin May7d96b162021-02-03 17:38:41 +00003459TfLiteParserImpl::BufferRawPtr TfLiteParserImpl::GetBuffer(const ModelPtr& model, size_t bufferIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003460{
3461 CHECK_BUFFER(model, bufferIndex);
3462 return model->buffers[bufferIndex].get();
3463}
3464
Matteo Martincigh747ef822018-12-18 09:26:39 +00003465template<typename T>
Kevin May7d96b162021-02-03 17:38:41 +00003466std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
3467TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
3468 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00003469 armnn::TensorInfo& tensorInfo,
3470 armnn::Optional<armnn::PermutationVector&> permutationVector)
3471{
3472 auto constData = CreateConstTensorImpl<T>(bufferPtr,
3473 tensorPtr,
3474 tensorInfo,
3475 permutationVector);
Kevin May7d96b162021-02-03 17:38:41 +00003476 TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
Matteo Martincigh747ef822018-12-18 09:26:39 +00003477 return std::make_pair(constData.first, std::move(storage));
3478}
3479
Kevin May7d96b162021-02-03 17:38:41 +00003480std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
3481TfLiteParserImpl::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00003482 armnn::TensorInfo& tensorInfo,
3483 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01003484{
3485 CHECK_TENSOR_PTR(tensorPtr);
3486 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
3487 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
3488
3489 switch (tensorInfo.GetDataType())
3490 {
3491 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003492 return CreateConstTensorAndStoreData<float>(bufferPtr,
3493 tensorPtr,
3494 tensorInfo,
3495 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00003496 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003497 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
3498 tensorPtr,
3499 tensorInfo,
3500 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00003501 case armnn::DataType::QSymmS8:
3502 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3503 tensorPtr,
3504 tensorInfo,
3505 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00003506 case armnn::DataType::QAsymmS8:
3507 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3508 tensorPtr,
3509 tensorInfo,
3510 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003511 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003512 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
3513 tensorPtr,
3514 tensorInfo,
3515 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003516 default:
3517 {
3518 std::stringstream errString;
3519 errString << "Unexpected datatype when creating const tensor: "
3520 << armnn::GetDataTypeName(tensorInfo.GetDataType())
3521 << " shape:" << tensorInfo.GetShape()
3522 << CHECK_LOCATION().AsString();
3523 throw ParseException(errString.str());
3524 }
3525 }
3526}
3527
Kevin May7d96b162021-02-03 17:38:41 +00003528BindingPointInfo TfLiteParserImpl::GetNetworkInputBindingInfo(size_t subgraphId,
3529 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01003530{
3531 CHECK_SUBGRAPH(m_Model, subgraphId);
3532 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3533 for (auto const & input : inputs)
3534 {
3535 if (input.second->name == name)
3536 {
3537 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3538 return std::make_pair(bindingId, ToTensorInfo(input.second));
3539 }
3540 }
3541
3542 std::stringstream bindings;
3543 for (auto const & input : inputs)
3544 {
3545 bindings << "'" << input.second->name << "' ";
3546 }
3547
3548 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003549 fmt::format("No input binding found for subgraph:{} and name:{}. "
3550 "Possible inputs are: [{}] {}",
3551 subgraphId,
3552 name,
3553 bindings.str(),
3554 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003555}
3556
Kevin May7d96b162021-02-03 17:38:41 +00003557BindingPointInfo TfLiteParserImpl::GetNetworkOutputBindingInfo(size_t subgraphId,
3558 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01003559{
3560 CHECK_SUBGRAPH(m_Model, subgraphId);
3561 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003562 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01003563 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003564 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01003565 if (output.second->name == name)
3566 {
3567 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003568 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3569 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3570 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01003571 }
3572 }
3573
3574 std::stringstream bindings;
3575 for (auto const & output : outputs)
3576 {
3577 bindings << "'" << output.second->name << "' ";
3578 }
3579
3580 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003581 fmt::format("No output binding found for subgraph:{} and name:{}. "
3582 "Possible outputs are: [{}] {}",
3583 subgraphId,
3584 name,
3585 bindings.str(),
3586 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003587}
3588
Kevin May7d96b162021-02-03 17:38:41 +00003589size_t TfLiteParserImpl::GetSubgraphCount() const
telsoa01c577f2c2018-08-31 09:22:23 +01003590{
3591 return m_Model->subgraphs.size();
3592}
3593
Kevin May7d96b162021-02-03 17:38:41 +00003594std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01003595{
3596 CHECK_SUBGRAPH(m_Model, subgraphId);
3597 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3598 std::vector<std::string> result;
3599 result.reserve(inputs.size());
3600 for (auto const & input : inputs)
3601 {
3602 result.push_back(input.second->name);
3603 }
3604 return result;
3605}
3606
Kevin May7d96b162021-02-03 17:38:41 +00003607std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01003608{
3609 CHECK_SUBGRAPH(m_Model, subgraphId);
3610 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3611 std::vector<std::string> result;
3612 result.reserve(outputs.size());
3613 for (auto const & output : outputs)
3614 {
3615 result.push_back(output.second->name);
3616 }
3617 return result;
3618}
3619
Matthew Sloyanac001ee2021-02-03 10:43:04 +00003620const std::string TfLiteParserImpl::GetVersion()
3621{
3622 return TFLITE_PARSER_VERSION;
3623}
3624
Kevin May7d96b162021-02-03 17:38:41 +00003625TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01003626: m_FloatData(std::move(data))
3627, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003628, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003629, m_Int32Data(nullptr)
3630{
3631}
3632
Kevin May7d96b162021-02-03 17:38:41 +00003633TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01003634: m_FloatData(nullptr)
3635, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00003636, m_Int8Data(nullptr)
3637, m_Int32Data(nullptr)
3638{
3639}
3640
Kevin May7d96b162021-02-03 17:38:41 +00003641TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
Keith Davisd305e1a2020-01-22 11:57:54 +00003642: m_FloatData(nullptr)
3643, m_Uint8Data(nullptr)
3644, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01003645, m_Int32Data(nullptr)
3646{
3647}
3648
Kevin May7d96b162021-02-03 17:38:41 +00003649TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01003650: m_FloatData(nullptr)
3651, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003652, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003653, m_Int32Data(std::move(data))
3654{
3655}
3656
3657} // armnnTfLiteParser