blob: 2df47eb19869d24a344b791556e7b392ddeaf844 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kellyc5789ca2020-07-06 19:24:15 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Matthew Sloyanac001ee2021-02-03 10:43:04 +00008#include "armnnTfLiteParser/Version.hpp"
9
Sadik Armagand109a4d2020-07-28 10:42:13 +010010#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +000011#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010012#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000013#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010014#include <armnn/Tensor.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000015#include <armnnUtils/TensorUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010016#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010017#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000018#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010019#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010020
21// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000022#include <armnnUtils/Permute.hpp>
Francis Murtagh532a29d2020-06-29 11:50:01 +010023#include <Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000024
Sadik Armagan479045b2018-10-01 11:51:37 +010025#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010026#include <VerificationHelpers.hpp>
27
28// The generated code based on the Tf Lite schema:
29#include <schema_generated.h>
30
Matteo Martincighe011d202019-11-28 11:35:47 +000031#include <flatbuffers/flexbuffers.h>
32
James Ward58dec6b2020-09-11 17:32:44 +010033#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010034
telsoa01c577f2c2018-08-31 09:22:23 +010035#include <algorithm>
Matthew Sloyanac001ee2021-02-03 10:43:04 +000036#include <fstream>
37#include <iostream>
telsoa01c577f2c2018-08-31 09:22:23 +010038#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010039#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000040#include <sstream>
41
42#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
43 { \
44 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
45 << ": " \
46 << CHECK_LOCATION().AsString()).str()); \
47 }
telsoa01c577f2c2018-08-31 09:22:23 +010048
49using namespace armnn;
50using armnn::CheckLocation;
51namespace armnnTfLiteParser
52{
Kevin May7d96b162021-02-03 17:38:41 +000053
54ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
55 pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
56
57ITfLiteParser::~ITfLiteParser() = default;
58
59ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
60{
61 return new ITfLiteParser(options);
62}
63
64ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
65{
66 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
67}
68
69void ITfLiteParser::Destroy(ITfLiteParser* parser)
70{
71 delete parser;
72}
73
74armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
75{
76 return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
77}
78
79armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
80{
81 return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
82}
83
84BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
85 const std::string& name) const
86{
87 return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
88}
89
90BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
91 const std::string& name) const
92{
93 return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
94}
95
96size_t ITfLiteParser::GetSubgraphCount() const
97{
98 return pTfLiteParserImpl->GetSubgraphCount();
99}
100
101std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
102{
103 return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
104}
105
106std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
107{
108 return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
109}
110
telsoa01c577f2c2018-08-31 09:22:23 +0100111namespace
112{
jimfly01c25411c2018-11-14 17:47:22 +0000113
telsoa01c577f2c2018-08-31 09:22:23 +0100114const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
115
Kevin May7d96b162021-02-03 17:38:41 +0000116void CheckSubgraph(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100117 size_t subgraphIndex,
118 const CheckLocation & location)
119{
120 if (model.get() == nullptr)
121 {
122 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100123 fmt::format("{} was called with invalid (null) model. "
124 "Possible reason is that the model is not yet loaded and Unpack(ed). "
125 "subgraph:{} at {}",
126 location.m_Function,
127 subgraphIndex,
128 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100129 }
130 else if (subgraphIndex >= model->subgraphs.size())
131 {
132 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100133 fmt::format("{} was called with an invalid subgraph index. "
134 "subgraph:{} at {}",
135 location.m_Function,
136 subgraphIndex,
137 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100138 }
139}
140
141#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
142 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
143
Kevin May7d96b162021-02-03 17:38:41 +0000144void CheckModel(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100145 size_t subgraphIndex,
146 size_t operatorIndex,
147 const CheckLocation & location)
148{
149 if (model.get() == nullptr)
150 {
151 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100152 fmt::format("{} was called with invalid (null) model. "
153 "Possible reason is that the model is not yet loaded and Unpack(ed). "
154 "subgraph:{} operator:{} at {}",
155 location.m_Function,
156 subgraphIndex,
157 operatorIndex,
158 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100159 }
160 else if (subgraphIndex >= model->subgraphs.size())
161 {
162 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100163 fmt::format("{} was called with an invalid subgraph index. "
164 "subgraph:{} operator:{} at {}",
165 location.m_Function,
166 subgraphIndex,
167 operatorIndex,
168 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100169 }
170 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
171 operatorIndex != VIRTUAL_OPERATOR_ID)
172 {
173 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100174 fmt::format("{} was called with an invalid operator index. "
175 "subgraph:{} operator:{} at {}",
176 location.m_Function,
177 subgraphIndex,
178 operatorIndex,
179 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100180 }
181}
182
183#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
184 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
185
Kevin May7d96b162021-02-03 17:38:41 +0000186void CheckTensor(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100187 size_t subgraphIndex,
188 size_t tensorIndex,
189 const CheckLocation & location)
190{
191 // not checking model, because I assume CHECK_MODEL already run
192 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100193 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100194
195 // also subgraph index should be checked by CHECK_MODEL so
196 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100197 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100198
199 // the tensor index is the only one to check here
200 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
201 {
202 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100203 fmt::format("{} was called with an invalid tensor index. "
204 "subgraph:{} tensor:{} at {}",
205 location.m_Function,
206 subgraphIndex,
207 tensorIndex,
208 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100209 }
210}
211
212#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
213 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
214
Kevin May7d96b162021-02-03 17:38:41 +0000215void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100216 const CheckLocation & location)
217{
218 if (rawPtr == nullptr)
219 {
220 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100221 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100222 }
223}
224
225#define CHECK_TENSOR_PTR(TENSOR_PTR) \
226 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
227
Kevin May7d96b162021-02-03 17:38:41 +0000228void CheckBuffer(const TfLiteParserImpl::ModelPtr & model,
telsoa01c577f2c2018-08-31 09:22:23 +0100229 size_t bufferIndex,
230 const CheckLocation & location)
231{
232 if (model.get() == nullptr)
233 {
234 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100235 fmt::format("{} was called with invalid (null) model. "
236 "Possible reason is that the model is not yet loaded and Unpack(ed). "
237 "buffer:{} at {}",
238 location.m_Function,
239 bufferIndex,
240 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100241 }
242 else if (bufferIndex >= model->buffers.size())
243 {
244 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100245 fmt::format("{} was called with an invalid buffer index. "
246 "buffer index:{} at {}",
247 location.m_Function,
248 bufferIndex,
249 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100250 }
251 else if (model->buffers[bufferIndex].get() == nullptr)
252 {
253 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100254 fmt::format("The buffer #{} is null. {}",
255 bufferIndex,
256 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100257 }
258}
259
260#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
261 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
262
Kevin May7d96b162021-02-03 17:38:41 +0000263void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100264 const armnn::TensorInfo & tensorInfo,
265 uint32_t bufferId,
266 const CheckLocation & location)
267{
268 if (bufferPtr == nullptr)
269 {
270 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100271 fmt::format("BufferPtr is null for buffer:{}. {}",
272 bufferId,
273 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100274 }
275 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
276 tensorInfo.GetNumBytes() > bufferPtr->data.size())
277 {
278 std::stringstream ss;
279 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
280 << "For tensor: " << tensorInfo.GetShape()
281 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
282 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
283 throw ParseException(ss.str());
284 }
285}
286
287#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
288 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
289
290bool IsActivationSupported(tflite::ActivationFunctionType activationType)
291{
292 switch(activationType)
293 {
294 case tflite::ActivationFunctionType_NONE:
295 case tflite::ActivationFunctionType_RELU:
296 case tflite::ActivationFunctionType_RELU6:
297 case tflite::ActivationFunctionType_TANH:
298 {
299 return true;
300 }
301 default:
302 {
303 return false;
304 }
305 }
306}
307
308#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
309 do { \
310 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
311 { \
312 throw ParseException( \
James Ward58dec6b2020-09-11 17:32:44 +0100313 fmt::format("TfLite parser doesn't suppport fused activation: " \
314 "{}/{} in {} subgraph:{} operator:{} at {}", \
315 OPTION->fused_activation_function, \
316 tflite::EnumNameActivationFunctionType(\
317 OPTION->fused_activation_function), \
318 __func__, \
319 SUBGRAPH_INDEX, \
320 OPERATOR_INDEX, \
321 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100322 } \
323 } while(false)
324
325
326std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
327{
328 std::vector<unsigned int> result;
329 result.reserve(in.size());
330 for (auto & i : in)
331 {
mathad01c21025d2021-04-26 10:09:37 +0100332 // If the location of the input data is -1 then the input should be ignored.
333 if (i == -1)
334 {
335 continue;
336 }
telsoa01c577f2c2018-08-31 09:22:23 +0100337 result.push_back(CHECKED_NON_NEGATIVE(i));
338 }
339 return result;
340}
341
342void CalcPadding(uint32_t inputSize,
343 uint32_t filterSize,
344 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100345 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100346 uint32_t& paddingFront,
347 uint32_t& paddingBack,
348 tflite::Padding padding)
349{
350 paddingFront = 0;
351 paddingBack = 0;
352 if (padding == tflite::Padding_SAME)
353 {
354 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100355 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
356 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100357 if (temp > inputSize)
358 {
359 paddingFront = (temp - inputSize) / 2;
360 paddingBack = (temp - inputSize) - paddingFront;
361 }
362 }
363}
364
Kevin May7d96b162021-02-03 17:38:41 +0000365armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100366 const std::vector<unsigned int>& shapes,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100367 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100368{
369 armnn::DataType type;
370 CHECK_TENSOR_PTR(tensorPtr);
371
372 switch (tensorPtr->type)
373 {
374 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000375 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100376 break;
377 case tflite::TensorType_FLOAT32:
378 type = armnn::DataType::Float32;
379 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000380 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000381 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000382 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000383 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000384 type = armnn::DataType::QAsymmS8;
385 }
386 else
387 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000388 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000389 type = armnn::DataType::QSymmS8;
390 }
Finn Williamsed66d142019-12-06 09:55:55 +0000391 break;
392 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000393 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000394 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100395 case tflite::TensorType_INT32:
396 type = armnn::DataType::Signed32;
397 break;
Inki Daed4619e22020-09-10 15:33:54 +0900398 case tflite::TensorType_INT64:
399 type = armnn::DataType::Signed64;
400 break;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100401 case tflite::TensorType_BOOL:
402 type = armnn::DataType::Boolean;
403 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100404 default:
405 {
406 CheckLocation location = CHECK_LOCATION();
407 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100408 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
409 tensorPtr->type,
410 tflite::EnumNameTensorType(tensorPtr->type),
411 tensorPtr->name,
412 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100413 }
414 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100415 std::vector<unsigned int> safeShape = shapes;
Sadik Armagand109a4d2020-07-28 10:42:13 +0100416 bool isDynamic = false;
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100417 if (safeShape.size() == 0)
418 {
419 safeShape.push_back(1);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100420 if (outputTensor)
421 {
422 isDynamic = true;
423 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100424 }
425
Keith Davisd305e1a2020-01-22 11:57:54 +0000426 float quantizationScale = 0.0f;
427 int32_t quantizationOffset = 0;
428
429 if (tensorPtr->quantization.get())
430 {
431 if (tensorPtr->quantization->scale.size() <= 1)
432 {
433 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
434 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
435
436 if (tensorPtr->quantization->scale.size() == 1)
437 {
438 quantizationScale = tensorPtr->quantization->scale[0];
439 }
440 if (tensorPtr->quantization->zero_point.size() == 1)
441 {
442 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000443 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100444 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000445 }
446
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100447 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100448 safeShape.data());
449 if (isDynamic)
450 {
451 tensorShape = TensorShape(1, false);
452 }
453 armnn::TensorInfo result(tensorShape,
454 type,
455 quantizationScale,
456 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000457 return result;
458 }
459 else
460 {
461 std::vector<float> quantizationScales;
462 std::vector<int32_t> quantizationOffsets;
463
464 // Scale
465 std::copy(tensorPtr->quantization->scale.begin(),
466 tensorPtr->quantization->scale.end(),
467 std::back_inserter(quantizationScales));
468
Keith Davis0c2eeac2020-02-11 16:51:50 +0000469 // QSymmS8 Per-axis
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100470 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100471 safeShape.data());
472 if (isDynamic)
473 {
474 tensorShape = TensorShape(1, false);
475 }
476 armnn::TensorInfo result(tensorShape,
477 type,
478 quantizationScales,
Jan Eilers7612bd62021-04-06 17:29:03 +0100479 armnn::numeric_cast<unsigned int>(tensorPtr->quantization->quantized_dimension));
Keith Davisd305e1a2020-01-22 11:57:54 +0000480 return result;
481 }
482 }
483 else
484 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100485 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100486 safeShape.data());
487 if (isDynamic)
488 {
489 tensorShape = TensorShape(1, false);
490 }
491 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000492 type,
493 quantizationScale,
494 quantizationOffset);
495 return result;
496 }
telsoa01c577f2c2018-08-31 09:22:23 +0100497}
498
Jan Eilers7612bd62021-04-06 17:29:03 +0100499armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr)
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000500{
501 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100502 return ToTensorInfo(tensorPtr, dimensions);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000503}
504
Kevin May7d96b162021-02-03 17:38:41 +0000505armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
Sadik Armagand109a4d2020-07-28 10:42:13 +0100506 const bool outputTensor)
507{
508 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Jan Eilers7612bd62021-04-06 17:29:03 +0100509 return ToTensorInfo(tensorPtr, dimensions, outputTensor);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100510}
511
telsoa01c577f2c2018-08-31 09:22:23 +0100512template<typename T>
513std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
Kevin May7d96b162021-02-03 17:38:41 +0000514CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
515 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000516 armnn::TensorInfo& tensorInfo,
517 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100518{
Jan Eilers8eb25602020-03-09 12:13:48 +0000519 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100520 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
521 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
James Ward58dec6b2020-09-11 17:32:44 +0100522 fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
telsoa01c577f2c2018-08-31 09:22:23 +0100523
524 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000525
526 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
527 {
528 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000529 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
530 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000531 }
532 else
533 {
534 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
535 }
536
telsoa01c577f2c2018-08-31 09:22:23 +0100537 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
538}
539
telsoa01c577f2c2018-08-31 09:22:23 +0100540armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
541{
542 // generate the binding id by shifting the tensor id by 8 bit
543 // and add the subgraph id, which allows 256 subgraphs
544 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
545}
546
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000547bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
548{
549 const unsigned int actualSize = actual.GetNumDimensions();
550 if (actualSize != expected.size())
551 {
552 return false;
553 }
554
555 for (unsigned int i = 0u; i < actualSize; i++)
556 {
557 if (expected[i] < 0 ||
558 actual[i] != static_cast<unsigned int>(expected[i]))
559 {
560 return false;
561 }
562 }
563
564 return true;
565}
566
James Conroy05102392020-06-24 15:39:55 +0100567void CheckMatchingQuantization(const TensorInfo& first,
568 const TensorInfo& second,
569 const std::string& descName,
570 std::string const& firstName,
571 std::string const& secondName)
572{
573 if (!first.IsQuantized() ||
574 !second.IsQuantized())
575 {
576 // Not a quantized type, ignore the validation
577 return;
578 }
579
580 DataType firstDataType = first.GetDataType();
581 DataType secondDataType = second.GetDataType();
582
583 if (firstDataType != secondDataType)
584 {
585 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
586 " must be of the same quantized type, " +
587 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
588 secondName + " is " + GetDataTypeName(secondDataType));
589 }
590
591 if (!first.IsTypeSpaceMatch(second))
592 {
593 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
594 " must have the same quantization space, " +
595 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
596 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
597 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
598 " and scale " + std::to_string(second.GetQuantizationScale()));
599 }
600}
601
telsoa01c577f2c2018-08-31 09:22:23 +0100602} // <anonymous>
603
Kevin May7d96b162021-02-03 17:38:41 +0000604TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100605: m_Options(options)
606, m_Network(nullptr, nullptr)
Kevin May7d96b162021-02-03 17:38:41 +0000607, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
telsoa01c577f2c2018-08-31 09:22:23 +0100608{
609 // register supported operators
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100610 m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
Kevin May7d96b162021-02-03 17:38:41 +0000611 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100612 m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
613 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
Kevin May7d96b162021-02-03 17:38:41 +0000614 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
615 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
mathad01b392e982021-04-07 12:07:30 +0100616 m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
Kevin May7d96b162021-02-03 17:38:41 +0000617 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
618 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
619 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
620 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
621 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
622 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100623 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
Kevin May7d96b162021-02-03 17:38:41 +0000624 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
625 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
Teresa Charlin3ab85482021-06-08 16:59:29 +0100626 m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
Kevin May7d96b162021-02-03 17:38:41 +0000627 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
628 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
629 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
630 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100631 m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
Kevin May7d96b162021-02-03 17:38:41 +0000632 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
633 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
634 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
635 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
636 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
637 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
638 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
639 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
640 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
641 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +0100642 m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
Kevin May7d96b162021-02-03 17:38:41 +0000643 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
644 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
645 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
Sadik Armagana2747482021-02-09 10:28:54 +0000646 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
647 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
Kevin May7d96b162021-02-03 17:38:41 +0000648 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
649 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
650 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
Matthew Sloyaned7fce42021-04-15 20:46:24 +0100651 m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
Keith Davis0176fd82021-06-01 17:36:32 +0100652 m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
Kevin May7d96b162021-02-03 17:38:41 +0000653 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
654 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
655 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
656 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
657 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
658 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
659 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
660 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
661 m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
662 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
663 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
664 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
665 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
Matthew Sloyan28f177c2021-04-09 14:38:52 +0100666
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100667 // register supported custom operators
Kevin May7d96b162021-02-03 17:38:41 +0000668 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100669}
670
Kevin May7d96b162021-02-03 17:38:41 +0000671void TfLiteParserImpl::ResetParser()
telsoa01c577f2c2018-08-31 09:22:23 +0100672{
673 m_Network = armnn::INetworkPtr(nullptr, nullptr);
674 m_Model = nullptr;
675 m_SubgraphConnections.clear();
676}
677
Kevin May7d96b162021-02-03 17:38:41 +0000678INetworkPtr TfLiteParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
telsoa01c577f2c2018-08-31 09:22:23 +0100679{
680 ResetParser();
681 m_Model = LoadModelFromFile(graphFile);
682 return CreateNetworkFromModel();
683}
684
Kevin May7d96b162021-02-03 17:38:41 +0000685INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
telsoa01c577f2c2018-08-31 09:22:23 +0100686{
687 ResetParser();
688 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
689 return CreateNetworkFromModel();
690}
691
Kevin May7d96b162021-02-03 17:38:41 +0000692INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
telsoa01c577f2c2018-08-31 09:22:23 +0100693{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100694
695 using NetworkOptions = std::vector<BackendOptions>;
696 NetworkOptions networkOptions = {};
697 if (m_Options && m_Options.value().m_InferAndValidate)
698 {
699 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
700 {
701 { "InferAndValidate", true }
702 });
703
704 networkOptions.push_back(shapeInferenceMethodOption);
705 }
706
707 m_Network = INetwork::Create(networkOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100708 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100709
telsoa01c577f2c2018-08-31 09:22:23 +0100710 if (m_Model->subgraphs.size() != 1)
711 {
712 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100713 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
714 m_Model->subgraphs.size(),
715 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100716 }
717
718 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +0100719 size_t operatorIndex = 0;
720 try
telsoa01c577f2c2018-08-31 09:22:23 +0100721 {
Colm Donelan6350d272020-06-09 16:56:25 +0100722 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100723 {
Colm Donelan6350d272020-06-09 16:56:25 +0100724 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
725 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +0100726 {
Colm Donelan6350d272020-06-09 16:56:25 +0100727 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
telsoa01c577f2c2018-08-31 09:22:23 +0100728 auto builtinCode = opCodePtr->builtin_code;
729
730 if (builtinCode > tflite::BuiltinOperator_MAX)
731 {
James Ward58dec6b2020-09-11 17:32:44 +0100732 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
733 "subgraph:{} operator idx:{}. {}",
734 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
735 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100736 }
737
738 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +0100739 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +0100740 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +0100741 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +0100742 }
telsoa01c577f2c2018-08-31 09:22:23 +0100743
Colm Donelan6350d272020-06-09 16:56:25 +0100744 SetupInputLayers(subgraphIndex);
745 SetupOutputLayers(subgraphIndex);
746 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100747
Colm Donelan6350d272020-06-09 16:56:25 +0100748 ++subgraphIndex;
749 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +0100750 }
telsoa01c577f2c2018-08-31 09:22:23 +0100751 }
Colm Donelan6350d272020-06-09 16:56:25 +0100752 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +0100753 {
Colm Donelan6350d272020-06-09 16:56:25 +0100754 std::stringstream errorString;
755 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
756 << subgraphIndex << " error: " << e.what();
757 ARMNN_LOG(error) << errorString.str();
758 std::stringstream errors;
759 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +0100760 throw ParseException(errors.str());
761 }
762
763 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +0100764 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100765 {
766 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
767 {
768 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
769 {
770 for (size_t inputSlotIdx = 0;
771 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
772 ++inputSlotIdx)
773 {
774 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
775 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
776 }
777 }
778 }
779 }
780
781 return std::move(m_Network);
782}
783
Kevin May7d96b162021-02-03 17:38:41 +0000784void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
785 size_t tensorIndex,
786 armnn::IOutputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +0100787{
788 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100789 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
790 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100791
792 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
793
794 // assuming there is only one producer for that tensor
795 if (tensorSlots.outputSlot != nullptr)
796 {
James Ward58dec6b2020-09-11 17:32:44 +0100797 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
798 "subgraph:{} tensor:{} {}",
799 subgraphIndex,
800 tensorIndex,
801 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100802 }
803
804 tensorSlots.outputSlot = slot;
805}
806
Kevin May7d96b162021-02-03 17:38:41 +0000807void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
808 size_t tensorIndex,
809 armnn::IInputSlot* slot)
telsoa01c577f2c2018-08-31 09:22:23 +0100810{
811 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100812 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
813 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100814
Finn Williamsd4fa5452021-03-01 12:31:41 +0000815 TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +0100816 tensorSlots.inputSlots.push_back(slot);
817}
818
Kevin May7d96b162021-02-03 17:38:41 +0000819void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100820{
821 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
822
823 // NOTE: By default we presume the custom operator is not supported
Kevin May7d96b162021-02-03 17:38:41 +0000824 auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100825
826 // Identify custom code defined for custom operator
827 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
828 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
829
830 // Find parser function that correspondes to custom code (if any)
831 auto iterator = m_CustomParserFunctions.find(customCode);
832 if (iterator != m_CustomParserFunctions.end())
833 {
834 customParserFunction = iterator->second;
835 }
836
837 // Run parser function
838 (this->*customParserFunction)(subgraphIndex, operatorIndex);
839}
840
Kevin May7d96b162021-02-03 17:38:41 +0000841void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100842{
843 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100844
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100845 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
846
847 auto opcodeIndex = operatorPtr->opcode_index;
848 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
849
850 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
851 {
852 // Do not add StandInLayer, throw ParseException instead
853 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100854 fmt::format("Operator not supported. "
855 "subgraph:{} operator:{} "
856 "opcode_index:{} opcode:{} / {} {}",
857 subgraphIndex,
858 operatorIndex,
859 opcodeIndex,
860 opcode,
861 tflite::EnumNameBuiltinOperator(opcode),
862 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100863 }
864
865 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
866 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
867
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100868 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
869 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100870
871 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +0100872 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100873
874 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
875 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +0100876 ARMNN_ASSERT(layer != nullptr);
877
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100878 for (unsigned int i = 0u; i < numOutputs; ++i)
879 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100880 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100881 }
882
883 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
884 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
885
886 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
887 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100888}
889
mathad01b392e982021-04-07 12:07:30 +0100890void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex)
891{
892 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
893
894 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
895 CHECK_VALID_SIZE(inputs.size(), 1);
896 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
897 CHECK_VALID_SIZE(outputs.size(), 1);
898
899 auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex);
900
901 IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str());
902 ARMNN_ASSERT(layer != nullptr);
903
904 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
905 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
906
907 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
908 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
909
910 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
911 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
912}
913
Kevin May7d96b162021-02-03 17:38:41 +0000914void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100915{
916 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
917
918 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
919 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
920
921 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
922
923 Convolution2dDescriptor desc;
924 desc.m_BiasEnabled = false;
925 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
926 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000927 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100928 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
929 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000930
telsoa01c577f2c2018-08-31 09:22:23 +0100931 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
932 CHECK_VALID_SIZE(inputs.size(), 2, 3);
933
934 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
935 CHECK_VALID_SIZE(outputs.size(), 1);
936
937 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
938 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
939
940 // assuming input is NHWC
941 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
942 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
943
944 // assuming the filter is OHWI : Output, H, W, Input
945 // which is essentially the same as NHWC
946 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
947 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
948
Pablo Tellof0bd6832019-04-26 17:58:13 +0100949 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
950 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
951 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
952 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100953
Finn Williamsd4fa5452021-03-01 12:31:41 +0000954 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100955 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100956
James Ward58dec6b2020-09-11 17:32:44 +0100957 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100958
959 if (inputs.size() == 3)
960 {
961 desc.m_BiasEnabled = true;
962 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +0000963 auto biasTensorAndData = CreateConstTensorNonPermuted(inputs[2], biasTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100964 layer = m_Network->AddConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +0000965 filterTensorAndData,
966 Optional<ConstTensor>(biasTensorAndData),
telsoa01c577f2c2018-08-31 09:22:23 +0100967 layerName.c_str());
968 }
969 else
970 {
971 layer = m_Network->AddConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +0000972 filterTensorAndData,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100973 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100974 layerName.c_str());
975 }
976
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100977 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100978
Sadik Armagand109a4d2020-07-28 10:42:13 +0100979 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +0000980 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100981
982 // register the input connection slots for the layer, connections are made after all layers have been created
983 // only the tensors for the inputs are relevant, exclude the const tensors
984 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000985 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100986
jimfly01c25411c2018-11-14 17:47:22 +0000987 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100988 // register the output connection slots for the layer, connections are made after all layers have been created
989 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
990 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
991}
992
Kevin May7d96b162021-02-03 17:38:41 +0000993void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100994{
995 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
996
997 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
998 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
999
1000 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1001
1002 DepthwiseConvolution2dDescriptor desc;
1003 desc.m_BiasEnabled = false;
1004 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1005 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +00001006 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001007 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +01001008
1009 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1010 CHECK_VALID_SIZE(inputs.size(), 2, 3);
1011 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1012 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +01001013 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
1014 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +00001015
telsoa01c577f2c2018-08-31 09:22:23 +01001016 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Jan Eilers7612bd62021-04-06 17:29:03 +01001017 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
telsoa01c577f2c2018-08-31 09:22:23 +01001018
Matteo Martincigh747ef822018-12-18 09:26:39 +00001019 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +01001020 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1021 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +00001022
1023 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +01001024 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1025 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1026
Pablo Tellof0bd6832019-04-26 17:58:13 +01001027 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
1028 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
1029 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
1030 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +01001031
Jan Eilers53ef7952021-06-02 12:01:25 +01001032 // ArmNN uses the same filter tensor layout at TfLite [1, H, W, O] no need for any permutation
1033 auto filterTensor = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001034 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001035 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001036
1037 if (inputs.size() == 3)
1038 {
1039 desc.m_BiasEnabled = true;
1040 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00001041 auto biasTensorAndData = CreateConstTensorNonPermuted(inputs[2], biasTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001042 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
Jan Eilers53ef7952021-06-02 12:01:25 +01001043 filterTensor,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001044 Optional<ConstTensor>(biasTensorAndData),
telsoa01c577f2c2018-08-31 09:22:23 +01001045 layerName.c_str());
1046 }
1047 else
1048 {
1049 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
Jan Eilers53ef7952021-06-02 12:01:25 +01001050 filterTensor,
Matteo Martincighfc598e12019-05-14 10:36:13 +01001051 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +01001052 layerName.c_str());
1053 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001054 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001055
Sadik Armagand109a4d2020-07-28 10:42:13 +01001056 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +00001057 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001058
1059 // register the input connection slots for the layer, connections are made after all layers have been created
1060 // only the tensors for the inputs are relevant, exclude the const tensors
1061 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001062 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +01001063
jimfly01c25411c2018-11-14 17:47:22 +00001064 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +01001065 // register the output connection slots for the layer, connections are made after all layers have been created
1066 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1067 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1068}
1069
Kevin May7d96b162021-02-03 17:38:41 +00001070void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsed66d142019-12-06 09:55:55 +00001071{
1072 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1073
1074 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1075 CHECK_VALID_SIZE(inputs.size(), 1);
1076
1077 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1078 CHECK_VALID_SIZE(outputs.size(), 1);
1079
James Ward58dec6b2020-09-11 17:32:44 +01001080 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +00001081
1082 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001083 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +00001084
Sadik Armagand109a4d2020-07-28 10:42:13 +01001085 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Finn Williamsed66d142019-12-06 09:55:55 +00001086 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1087
1088 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1089 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1090
1091 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1092 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1093}
1094
Teresa Charlin3ab85482021-06-08 16:59:29 +01001095void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorIndex)
1096{
1097 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1098
1099 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1100 CHECK_VALID_SIZE(inputs.size(), 2);
1101
1102 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1103 CHECK_VALID_SIZE(outputs.size(), 1);
1104
1105 auto layerName = fmt::format("ExpandDims:{}:{}", subgraphIndex, operatorIndex);
1106
1107 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1108 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1109
1110 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1111
1112 ReshapeDescriptor reshapeDesc;
1113 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1114
1115 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1116 ARMNN_ASSERT(layer != nullptr);
1117 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1118
1119 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1120 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1121
1122 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1123 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1124}
1125
Kevin May7d96b162021-02-03 17:38:41 +00001126void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
Keith Davis4cd29a02019-09-09 14:49:20 +01001127{
1128 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1129
1130 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001131 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001132
1133 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1134 CHECK_VALID_SIZE(outputs.size(), 1);
1135
James Ward58dec6b2020-09-11 17:32:44 +01001136 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001137 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001138
josh minorba424d22019-11-13 10:55:17 -06001139 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001140 {
1141 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1142 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001143 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1144 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001145 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001146 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001147
Mike Kelly08759e22020-03-02 11:41:31 +00001148 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001149 }
1150
James Conroy05102392020-06-24 15:39:55 +01001151 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001152 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001153 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001154
James Conroy05102392020-06-24 15:39:55 +01001155 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001156 ARMNN_ASSERT(layer != nullptr);
Keith Davis4cd29a02019-09-09 14:49:20 +01001157 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1158
1159 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1160 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1161
1162 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1163 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1164}
1165
Kevin May7d96b162021-02-03 17:38:41 +00001166void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001167{
1168 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1169
1170 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1171 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1172
1173 TransposeConvolution2dDescriptor desc;
1174 desc.m_BiasEnabled = false;
1175 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1176 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1177 desc.m_DataLayout = armnn::DataLayout::NHWC;
1178
1179 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
David Monahan61683802021-01-12 09:11:07 +00001180 if (inputs.size() == 4)
1181 {
1182 desc.m_BiasEnabled = true;
1183 }
1184 else
1185 {
1186 CHECK_VALID_SIZE(inputs.size(), 3);
1187 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001188
1189 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1190 CHECK_VALID_SIZE(outputs.size(), 1);
1191
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001192 if (inputs[0])
1193 {
1194 armnn::TensorInfo tensorInfo = ToTensorInfo(inputs[0]);
1195 std::vector<int> output_shape(tensorInfo.GetNumElements());
1196 if (tensorInfo.GetDataType() == DataType::Signed32)
1197 {
1198 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1199 }
1200 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1201 {
1202 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1203 {
1204 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1205 }
1206 }
1207 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1208 for (int dimension : output_shape)
1209 {
1210 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1211 }
1212 desc.m_OutputShapeEnabled = true;
1213 }
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001214 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001215 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1216
1217 // TfLite uses NHWC tensors
1218 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1219 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1220
1221 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1222 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1223
1224 CalcPadding(inputHeight,
1225 filterHeight,
1226 desc.m_StrideY,
1227 1, // DilationY
1228 desc.m_PadTop,
1229 desc.m_PadBottom,
1230 options->padding);
1231
1232 CalcPadding(inputWidth,
1233 filterWidth,
1234 desc.m_StrideX,
1235 1, // DilationX
1236 desc.m_PadLeft,
1237 desc.m_PadRight,
1238 options->padding);
1239
Finn Williamsd4fa5452021-03-01 12:31:41 +00001240 auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001241
1242 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001243 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001244
David Monahan61683802021-01-12 09:11:07 +00001245 if (desc.m_BiasEnabled)
1246 {
1247 auto biasTensorInfo = ToTensorInfo(inputs[3]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00001248 auto biasConstTensor = CreateConstTensorNonPermuted(inputs[3], biasTensorInfo);
David Monahan61683802021-01-12 09:11:07 +00001249 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001250 filterTensorAndData,
1251 biasConstTensor,
David Monahan61683802021-01-12 09:11:07 +00001252 layerName.c_str());
1253 }
1254 else
1255 {
1256 layer = m_Network->AddTransposeConvolution2dLayer(desc,
Finn Williamsd4fa5452021-03-01 12:31:41 +00001257 filterTensorAndData,
David Monahan61683802021-01-12 09:11:07 +00001258 EmptyOptional(),
1259 layerName.c_str());
1260 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001261
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001262 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001263
Sadik Armagand109a4d2020-07-28 10:42:13 +01001264 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001265 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1266
1267 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1268 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001269 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001270
1271 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1272 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1273}
1274
Kevin May7d96b162021-02-03 17:38:41 +00001275void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001276{
1277 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1278}
1279
Kevin May7d96b162021-02-03 17:38:41 +00001280void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001281{
1282 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1283
1284 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1285 CHECK_VALID_SIZE(inputs.size(), 3);
1286
1287 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1288 CHECK_VALID_SIZE(outputs.size(), 1);
1289
1290 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1291 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1292
1293 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1294 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1295
1296 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1297 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1298
1299 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1300 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1301
1302 size_t step = 2;
1303 std::vector<std::pair<unsigned int, unsigned int>> crops;
1304 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1305 {
1306 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1307 }
1308
1309 armnn::BatchToSpaceNdDescriptor desc;
1310 desc.m_BlockShape = blockShape;
1311 desc.m_Crops = crops;
1312 desc.m_DataLayout = armnn::DataLayout::NHWC;
1313
James Ward58dec6b2020-09-11 17:32:44 +01001314 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001315
James Conroy05102392020-06-24 15:39:55 +01001316 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001317 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001318 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1319
1320 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1321 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001322 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1323
1324 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1325 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1326
1327 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1328 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1329}
1330
Kevin May7d96b162021-02-03 17:38:41 +00001331void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
Matthew Jackson28c94572019-07-18 10:47:03 +01001332{
1333 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1334
1335 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1336 CHECK_VALID_SIZE(inputs.size(), 1);
1337
1338 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1339 CHECK_VALID_SIZE(outputs.size(), 1);
1340
1341 L2NormalizationDescriptor desc;
1342 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001343 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001344 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1345
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001346 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001347
Sadik Armagand109a4d2020-07-28 10:42:13 +01001348 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson28c94572019-07-18 10:47:03 +01001349 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1350
1351 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1352 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1353
1354 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1355 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1356}
1357
Kevin May7d96b162021-02-03 17:38:41 +00001358void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001359{
1360 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1361}
1362
Kevin May7d96b162021-02-03 17:38:41 +00001363void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001364{
1365 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1366
1367 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1368 CHECK_VALID_SIZE(inputs.size(), 2);
1369
1370 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1371 CHECK_VALID_SIZE(outputs.size(), 1);
1372
James Ward58dec6b2020-09-11 17:32:44 +01001373 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001374
1375 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1376 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1377 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001378
Sadik Armagand109a4d2020-07-28 10:42:13 +01001379 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001380 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1381
1382 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1383 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001384 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1385
1386 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001387 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001388
1389 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1390 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1391}
1392
Kevin May7d96b162021-02-03 17:38:41 +00001393void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001394{
1395 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1396
1397 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1398 CHECK_VALID_SIZE(inputs.size(), 2);
1399
1400 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1401 CHECK_VALID_SIZE(outputs.size(), 1);
1402
James Ward58dec6b2020-09-11 17:32:44 +01001403 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001404
1405 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1406 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1407 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001408
Sadik Armagand109a4d2020-07-28 10:42:13 +01001409 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001410 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1411
1412 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1413 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001414 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1415
1416 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001417 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001418
1419 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1420 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1421}
1422
Kevin May7d96b162021-02-03 17:38:41 +00001423void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
1424 size_t operatorIndex,
1425 PoolingAlgorithm algorithm)
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001426{
1427 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1428
1429 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1430 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1431
1432 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1433
1434 std::string layerName;
1435
1436 switch (algorithm)
1437 {
1438 case PoolingAlgorithm::Average:
1439 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001440 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001441 break;
1442 case PoolingAlgorithm::Max:
1443 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001444 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001445 break;
1446 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001447 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001448 }
1449
1450 Pooling2dDescriptor desc;
1451
1452 desc.m_PoolType = algorithm;
1453 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1454 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1455 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1456 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1457 desc.m_PaddingMethod = PaddingMethod::Exclude;
1458 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001459 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001460
1461 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1462 CHECK_VALID_SIZE(inputs.size(), 1);
1463 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1464
1465 // assuming input is NHWC
1466 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1467 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1468
Pablo Tellof0bd6832019-04-26 17:58:13 +01001469 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1470 desc.m_PadTop, desc.m_PadBottom, options->padding);
1471 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1472 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001473
1474 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1475 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001476
Sadik Armagand109a4d2020-07-28 10:42:13 +01001477 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001478 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1479
1480 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1481 ARMNN_ASSERT(layer != nullptr);
jimfly01c25411c2018-11-14 17:47:22 +00001482 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001483
1484 // register the input connection slots for the layer, connections are made after all layers have been created
1485 // only the tensors for the inputs are relevant, exclude the const tensors
1486 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001487 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001488
jimfly01c25411c2018-11-14 17:47:22 +00001489 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001490 // register the output connection slots for the layer, connections are made after all layers have been created
1491 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1492 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1493}
1494
Kevin May7d96b162021-02-03 17:38:41 +00001495void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
josh minorba424d22019-11-13 10:55:17 -06001496{
1497 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1498
1499 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1500 CHECK_VALID_SIZE(inputs.size(), 3);
1501 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1502 CHECK_VALID_SIZE(outputs.size(), 1);
1503
1504 SliceDescriptor desc;
1505
1506 // set begin tensor info for slice descriptor
1507 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1508 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1509
1510 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1511 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1512
1513 // set size tensor info for slice descriptor
1514 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1515 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1516
1517 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1518 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1519 desc = SliceDescriptor(begin, size);
1520
James Ward58dec6b2020-09-11 17:32:44 +01001521 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06001522
James Conroy05102392020-06-24 15:39:55 +01001523 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001524 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001525 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1526
1527 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
josh minorba424d22019-11-13 10:55:17 -06001528 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1529
1530 // register the input connection slots for the layer, connections are made after all layers have been created
1531 // only the tensors for the inputs are relevant, exclude the const tensors
1532 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1533 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1534
1535 // register the output connection slots for the layer, connections are made after all layers have been created
1536 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1537 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1538}
1539
Kevin May7d96b162021-02-03 17:38:41 +00001540void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001541{
1542 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1543 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1544 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1545
1546 SoftmaxDescriptor desc;
1547 desc.m_Beta = options->beta;
1548
1549 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1550 CHECK_VALID_SIZE(inputs.size(), 1);
1551 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1552 CHECK_VALID_SIZE(outputs.size(), 1);
1553
James Ward58dec6b2020-09-11 17:32:44 +01001554 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001555 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1556
Sadik Armagand109a4d2020-07-28 10:42:13 +01001557 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
telsoa01c577f2c2018-08-31 09:22:23 +01001558 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1559
1560 // register the input connection slots for the layer, connections are made after all layers have been created
1561 // only the tensors for the inputs are relevant, exclude the const tensors
1562 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1563 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1564
1565 // register the output connection slots for the layer, connections are made after all layers have been created
1566 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1567 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1568}
1569
Kevin May7d96b162021-02-03 17:38:41 +00001570void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001571{
1572 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1573
1574 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1575 CHECK_VALID_SIZE(inputs.size(), 3);
1576
1577 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1578 CHECK_VALID_SIZE(outputs.size(), 1);
1579
1580 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1581 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1582
1583 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1584 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1585
1586 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1587 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1588
1589 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1590 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1591
1592 size_t step = 2;
1593 std::vector<std::pair<unsigned int, unsigned int>> padList;
1594 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1595 {
1596 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1597 }
1598
1599 armnn::SpaceToBatchNdDescriptor desc;
1600 desc.m_BlockShape = blockShape;
1601 desc.m_PadList = padList;
1602 desc.m_DataLayout = armnn::DataLayout::NHWC;
1603
James Ward58dec6b2020-09-11 17:32:44 +01001604 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001605
James Conroy05102392020-06-24 15:39:55 +01001606 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001607 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001608 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1609
1610 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1611 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001612 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1613
1614 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1615 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1616
1617 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1618 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1619}
1620
Teresa Charlin3ab85482021-06-08 16:59:29 +01001621armnn::TensorInfo TfLiteParserImpl::OutputShapeOfSqueeze(std::vector<uint32_t> squeezeDims,
Kevin May7d96b162021-02-03 17:38:41 +00001622 const armnn::TensorInfo & inputTensorInfo)
telsoa01c577f2c2018-08-31 09:22:23 +01001623{
Teresa Charlin3ab85482021-06-08 16:59:29 +01001624 CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
telsoa01c577f2c2018-08-31 09:22:23 +01001625 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1626
1627 if (inputTensorInfo.GetNumDimensions() > 4)
1628 {
1629 std::stringstream ss;
1630 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1631 << " shape:" << inputTensorInfo.GetShape() << " "
1632 << CHECK_LOCATION().AsString();
1633 throw ParseException(ss.str());
1634 }
1635
1636 if (squeezeDims.empty())
1637 {
1638 squeezeDims.assign(dimensionSequence,
1639 dimensionSequence+inputTensorInfo.GetNumDimensions());
1640 }
1641
1642 std::vector<uint32_t> outputDims;
1643 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1644 {
1645 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1646 auto currentDimension = inputTensorInfo.GetShape()[i];
1647 if (skipSqueeze || currentDimension != 1)
1648 {
1649 outputDims.push_back(currentDimension);
1650 }
1651 }
1652
1653 if (outputDims.size() > 4)
1654 {
1655 std::stringstream ss;
1656 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1657 << " shape:" << inputTensorInfo.GetShape() << " "
1658 << CHECK_LOCATION().AsString();
1659 throw ParseException(ss.str());
1660 }
1661
1662 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1663 outputDims.data());
1664
1665 // we need to preserve the tensor type and the quantization data as well
1666 TensorInfo outTensorInfo = inputTensorInfo;
1667 outTensorInfo.SetShape(outShape);
1668
1669 return outTensorInfo;
1670}
1671
Keith Davis0176fd82021-06-01 17:36:32 +01001672void TfLiteParserImpl::ParseShape(size_t subgraphIndex, size_t operatorIndex)
1673{
1674 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1675
1676 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1677 CHECK_VALID_SIZE(inputs.size(), 1);
1678 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1679 CHECK_VALID_SIZE(outputs.size(), 1);
1680
1681 auto layerName = fmt::format("Shape:{}:{}", subgraphIndex, operatorIndex);
1682
1683 IConnectableLayer* layer = m_Network->AddShapeLayer(layerName.c_str());
1684 ARMNN_ASSERT(layer != nullptr);
1685
1686
1687 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
1688 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1689
1690 // Check if output tensor type is Signed32 or Signed64
1691 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
1692 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
1693 {
1694 throw ParseException(
1695 fmt::format(
1696 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
1697 CHECK_LOCATION().AsString()));
1698 }
1699
1700 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1701 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1702
1703 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1704 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1705}
1706
Kevin May7d96b162021-02-03 17:38:41 +00001707void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01001708{
1709 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1710
1711 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1712 CHECK_VALID_SIZE(inputs.size(), 1);
1713
1714 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1715 CHECK_VALID_SIZE(outputs.size(), 1);
1716
1717 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1718 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01001719 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001720
1721 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Teresa Charlin3ab85482021-06-08 16:59:29 +01001722
1723 std::vector<uint32_t> squeezeDim;
1724 // A single negative dim index is interpreted as a negative index in python
1725 // Meaning the index will be the shape size plus the negative index value
1726 if (options->squeeze_dims.size() == 1 && options->squeeze_dims[0] < 0)
1727 {
1728 int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
1729 squeezeDim.push_back(static_cast<uint32_t>(dim));
1730 }
1731 else
1732 {
1733 squeezeDim = AsUnsignedVector(options->squeeze_dims);
1734 }
1735
1736 armnn::TensorInfo outputTensorInfo = TfLiteParserImpl::OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
1737
James Conroy05102392020-06-24 15:39:55 +01001738 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01001739
1740 ReshapeDescriptor reshapeDesc;
1741 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1742
telsoa01c577f2c2018-08-31 09:22:23 +01001743 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001744 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001745 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1746
1747 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1748 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1749
1750 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1751 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1752}
1753
Kevin May7d96b162021-02-03 17:38:41 +00001754void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001755{
1756 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1757
1758 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1759 CHECK_VALID_SIZE(inputs.size(), 4);
1760
1761 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1762 CHECK_VALID_SIZE(outputs.size(), 1);
1763
1764 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1765 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1766
1767 StridedSliceDescriptor desc;
1768 desc.m_BeginMask = options->begin_mask;
1769 desc.m_EllipsisMask = options->ellipsis_mask;
1770 desc.m_EndMask = options->end_mask;
1771 desc.m_NewAxisMask = options->new_axis_mask;
1772 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1773 desc.m_DataLayout = armnn::DataLayout::NHWC;
1774
1775 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1776 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1777
1778 std::vector<int> begin(beginTensorInfo.GetNumElements());
1779 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1780
1781 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1782 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1783
1784 std::vector<int> end(endTensorInfo.GetNumElements());
1785 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1786
1787 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1788 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1789
1790 std::vector<int> stride(strideTensorInfo.GetNumElements());
1791 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1792
1793 desc.m_Begin = begin;
1794 desc.m_End = end;
1795 desc.m_Stride = stride;
1796
James Ward58dec6b2020-09-11 17:32:44 +01001797 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001798 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001799 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001800
Sadik Armagand109a4d2020-07-28 10:42:13 +01001801 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001802 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1803
1804 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1805 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1806
1807 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1808 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1809}
1810
Kevin May7d96b162021-02-03 17:38:41 +00001811void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001812{
1813 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1814
1815 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1816 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1817
1818 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1819 CHECK_VALID_SIZE(inputs.size(), 2);
1820
1821 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1822 CHECK_VALID_SIZE(outputs.size(), 1);
1823
1824 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1825 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1826
James Ward58dec6b2020-09-11 17:32:44 +01001827 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001828 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001829 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001830
Sadik Armagand109a4d2020-07-28 10:42:13 +01001831 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001832 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1833
1834 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001835 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001836
1837 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1838
1839 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1840 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1841}
1842
Kevin May7d96b162021-02-03 17:38:41 +00001843void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301844{
1845 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1846
1847 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1848 const auto * options = operatorPtr->builtin_options.AsDivOptions();
1849
1850 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1851 CHECK_VALID_SIZE(inputs.size(), 2);
1852
1853 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1854 CHECK_VALID_SIZE(outputs.size(), 1);
1855
1856 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1857 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1858
James Ward58dec6b2020-09-11 17:32:44 +01001859 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301860 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001861 ARMNN_ASSERT(layer != nullptr);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301862
Sadik Armagand109a4d2020-07-28 10:42:13 +01001863 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301864 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1865
1866 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001867 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301868 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1869
1870 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1871 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1872}
1873
Kevin May7d96b162021-02-03 17:38:41 +00001874void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001875{
1876 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1877
1878 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1879 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1880
1881 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1882 CHECK_VALID_SIZE(inputs.size(), 2);
1883
1884 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1885 CHECK_VALID_SIZE(outputs.size(), 1);
1886
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001887 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1888 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1889
James Ward58dec6b2020-09-11 17:32:44 +01001890 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001891 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001892 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001893
Sadik Armagand109a4d2020-07-28 10:42:13 +01001894 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001895 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1896
1897 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001898 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001899 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1900
1901 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1902 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1903}
1904
Kevin May7d96b162021-02-03 17:38:41 +00001905void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001906{
1907 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1908
1909 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1910 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1911
1912 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1913 CHECK_VALID_SIZE(inputs.size(), 2);
1914
1915 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1916 CHECK_VALID_SIZE(outputs.size(), 1);
1917
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001918 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1919 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1920
James Ward58dec6b2020-09-11 17:32:44 +01001921 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001922 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001923 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001924
Sadik Armagand109a4d2020-07-28 10:42:13 +01001925 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001926 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1927
1928 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001929 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001930 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1931
1932 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1933 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1934}
1935
Kevin May7d96b162021-02-03 17:38:41 +00001936void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001937{
1938 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1939
1940 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1941
1942 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1943 CHECK_VALID_SIZE(outputs.size(), 1);
1944
1945 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1946 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1947
1948 armnn::MeanDescriptor desc;
1949 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1950 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1951 desc.m_Axis = axis;
1952
1953 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001954 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001955
1956 desc.m_KeepDims =
1957 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1958 true : false;
1959
James Ward58dec6b2020-09-11 17:32:44 +01001960 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001961 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001962 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001963
1964 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1965
1966 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1967 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1968
1969 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1970 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1971}
1972
Kevin May7d96b162021-02-03 17:38:41 +00001973void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001974{
1975 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1976
Kevin May7d96b162021-02-03 17:38:41 +00001977 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001978
Kevin May7d96b162021-02-03 17:38:41 +00001979 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001980 CHECK_VALID_SIZE(outputs.size(), 1);
1981
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00001982 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1983
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001984 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1985 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1986
1987 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1988 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1989
1990 size_t step = 2;
1991 armnn::PadDescriptor desc;
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00001992 if (inputTensorInfo.IsQuantized())
1993 {
1994 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
1995 }
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001996 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1997 {
1998 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1999 }
2000
James Ward58dec6b2020-09-11 17:32:44 +01002001 auto layerName = fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002002 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002003
2004 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
2005 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02002006 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2007
2008 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2009 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2010
2011 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2012 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2013}
2014
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002015void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
2016{
2017 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2018
2019 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2020 CHECK_VALID_SIZE(inputs.size(), 2);
2021
2022 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2023 CHECK_VALID_SIZE(outputs.size(), 1);
2024
2025 auto layerName = fmt::format("Prelu:{}:{}", subgraphIndex, operatorIndex);
2026
2027 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2028 armnn::TensorInfo alphaTensorInfo = ToTensorInfo(inputs[1]);
2029 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
2030 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2031
2032 IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
2033 ARMNN_ASSERT(layer != nullptr);
2034 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2035
2036 if (IsConstTensor(inputs[1]))
2037 {
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002038 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawaratbf99b5f2021-05-27 09:55:43 +01002039 armnn::IInputSlot* slot = &(layer->GetInputSlot(0));
2040 RegisterConsumerOfTensor(subgraphIndex, inputTensorIndexes[0], slot);
Narumol Prangnawaratbfaee6b2021-05-24 18:50:24 +01002041
2042 auto alphaTensorAndData = CreateConstTensorNonPermuted(inputs[1], alphaTensorInfo);
2043 std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name);
2044 IConnectableLayer* constLayer =
2045 m_Network->AddConstantLayer(alphaTensorAndData, constLayerName.c_str());
2046 ARMNN_ASSERT(constLayer != nullptr);
2047
2048 constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
2049 constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
2050 RegisterOutputSlots(subgraphIndex,
2051 VIRTUAL_OPERATOR_ID,
2052 constLayer,
2053 { inputTensorIndexes[1] });
2054 }
2055 else
2056 {
2057 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2058 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIndexes);
2059 }
2060
2061 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2062 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2063}
2064
Kevin May7d96b162021-02-03 17:38:41 +00002065void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan66dedc72019-12-10 16:32:07 +00002066{
2067 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2068
2069 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2070 CHECK_VALID_SIZE(inputs.size(), 1);
2071
2072 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2073 CHECK_VALID_SIZE(outputs.size(), 1);
2074
James Ward58dec6b2020-09-11 17:32:44 +01002075 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002076
2077 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002078 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002079
Sadik Armagand109a4d2020-07-28 10:42:13 +01002080 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan66dedc72019-12-10 16:32:07 +00002081 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2082
2083 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2084 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2085
2086 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2087 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2088}
Finn Williamsc42c3842019-01-22 14:18:11 +00002089
Kevin May7d96b162021-02-03 17:38:41 +00002090void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002091{
Finn Williamsc42c3842019-01-22 14:18:11 +00002092 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01002093}
2094
Kevin May7d96b162021-02-03 17:38:41 +00002095void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan58f39192018-09-17 14:14:39 +01002096{
Finn Williamsc42c3842019-01-22 14:18:11 +00002097 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
2098}
Sadik Armagan58f39192018-09-17 14:14:39 +01002099
Kevin May7d96b162021-02-03 17:38:41 +00002100void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan12239e72020-05-27 11:06:17 +01002101{
Jan Eilers2f746b32020-07-28 14:00:06 +01002102 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01002103}
2104
Kevin May7d96b162021-02-03 17:38:41 +00002105void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
Finn Williamsc42c3842019-01-22 14:18:11 +00002106{
2107 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
2108}
2109
Kevin May7d96b162021-02-03 17:38:41 +00002110void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd99851762019-04-09 09:37:38 +01002111{
2112 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
2113}
2114
Kevin May7d96b162021-02-03 17:38:41 +00002115void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
Matthew Sloyan7515d072020-12-16 12:50:01 +00002116{
2117 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
2118}
2119
Kevin May7d96b162021-02-03 17:38:41 +00002120void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
Jan Eilers2f746b32020-07-28 14:00:06 +01002121{
2122 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
2123}
Finn Williamsc42c3842019-01-22 14:18:11 +00002124
Kevin May7d96b162021-02-03 17:38:41 +00002125void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
Finn Williamsc42c3842019-01-22 14:18:11 +00002126{
2127 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01002128 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00002129 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01002130
2131 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2132 CHECK_VALID_SIZE(inputs.size(), 1);
2133
2134 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2135 CHECK_VALID_SIZE(outputs.size(), 1);
2136
James Ward58dec6b2020-09-11 17:32:44 +01002137 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01002138 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00002139 activationDesc.m_Function = activationType;
2140
2141 switch (activationType)
2142 {
2143 case ActivationFunction::ReLu:
2144 {
James Ward58dec6b2020-09-11 17:32:44 +01002145 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002146 break;
2147 }
2148 case ActivationFunction::BoundedReLu:
2149 {
James Ward58dec6b2020-09-11 17:32:44 +01002150 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002151 activationDesc.m_A = 6.0f;
2152 activationDesc.m_B = 0.0f;
2153 break;
2154 }
2155 case ActivationFunction::Sigmoid:
2156 {
James Ward58dec6b2020-09-11 17:32:44 +01002157 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00002158 break;
2159 }
Nina Drozd99851762019-04-09 09:37:38 +01002160 case ActivationFunction::TanH:
2161 {
James Ward58dec6b2020-09-11 17:32:44 +01002162 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01002163 activationDesc.m_A = 1.0f;
2164 activationDesc.m_B = 1.0f;
2165 break;
2166 }
Sadik Armagan12239e72020-05-27 11:06:17 +01002167 case ActivationFunction::LeakyReLu:
2168 {
James Ward58dec6b2020-09-11 17:32:44 +01002169 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan12239e72020-05-27 11:06:17 +01002170 const auto * options = operatorPtr->builtin_options.AsLeakyReluOptions();
2171 activationDesc.m_A = options->alpha;
2172 break;
2173 }
Matthew Sloyan7515d072020-12-16 12:50:01 +00002174 case ActivationFunction::Elu:
2175 {
2176 layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
2177 activationDesc.m_A = 1.0f;
2178 break;
2179 }
Jan Eilers2f746b32020-07-28 14:00:06 +01002180 case ActivationFunction::HardSwish:
Matthew Sloyan7515d072020-12-16 12:50:01 +00002181 {
James Ward58dec6b2020-09-11 17:32:44 +01002182 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01002183 break;
Matthew Sloyan7515d072020-12-16 12:50:01 +00002184 }
Finn Williamsc42c3842019-01-22 14:18:11 +00002185 default:
2186 {
2187 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002188 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
2189 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00002190 }
2191 }
2192
2193 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01002194
Sadik Armagand109a4d2020-07-28 10:42:13 +01002195 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan58f39192018-09-17 14:14:39 +01002196 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2197
2198 // register the input connection slots for the layer, connections are made after all layers have been created
2199 // only the tensors for the inputs are relevant, exclude the const tensors
2200 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2201 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2202
2203 // register the output connection slots for the layer, connections are made after all layers have been created
2204 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2205 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2206}
Kevin May7d96b162021-02-03 17:38:41 +00002207armnn::TensorInfo TfLiteParserImpl::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
2208 const std::vector<int32_t> & targetDimsIn)
Sadikb94967b2018-09-19 15:30:00 +01002209{
2210 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2211 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2212
2213 if (stretchDim != targetDimsIn.end())
2214 {
2215 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2216 {
2217 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002218 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01002219 }
2220
2221 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002222 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01002223 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2224
2225 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2226 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2227 }
2228
2229 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2230
2231 TensorInfo reshapeInfo = inputTensorInfo;
2232 reshapeInfo.SetShape(outputShape);
2233
2234 return reshapeInfo;
2235}
2236
Kevin May7d96b162021-02-03 17:38:41 +00002237void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
Sadikb94967b2018-09-19 15:30:00 +01002238{
2239 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2240
2241 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002242
2243 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2244 CHECK_VALID_SIZE(outputs.size(), 1);
2245
2246 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2247 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002248 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002249
2250 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00002251 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01002252 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00002253
Jan Eilersbac9b352020-07-13 13:40:24 +01002254 // Extracting new shape for the output
2255 // There are two ways it can be passed
2256 // * First is to define the target shape in the operator built-in options
2257 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00002258 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01002259 bool targetShapeFound = false;
2260 // Check if built-in options were given
2261 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00002262 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002263 // make sure the parameter is given
2264 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00002265 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002266 targetShape = options->new_shape;
2267 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00002268 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002269 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002270
2271 // If there is no built-in option given or if the built-in new_shape parameter was empty
2272 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00002273 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002274 // Check for a second input tensor
2275 if (inputs.size() > 1 && inputs[1] != nullptr)
2276 {
2277 if (inputs[1]->is_variable)
2278 {
2279 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
2280 }
2281
2282 if (inputs[1]->shape.size() != 1)
2283 {
2284 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
2285 }
2286
2287 if (inputs[1]->type != tflite::TensorType_INT32)
2288 {
2289 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
2290 }
2291
2292 // Extract target shape from input
2293 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2294 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
Sadik Armagan19a1c032021-01-20 12:17:00 +00002295 if (!values)
2296 {
2297 ARMNN_THROW_PARSE_EXCEPTION("Reshape operator target shape input buffer data is null");
2298 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002299 for (int i=0; i < inputs[1]->shape[0]; ++i)
2300 {
2301 targetShape.push_back(values[i]);
2302 }
2303 }
2304 else
Derek Lambertic9e52792020-03-11 11:42:26 +00002305 {
2306 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
2307 "At least one method required");
2308 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002309 }
2310
kevmay0171972a82018-12-17 14:28:03 +00002311 armnn::TensorInfo reshapeOutputTensorInfo =
Kevin May7d96b162021-02-03 17:38:41 +00002312 TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01002313
kevmay0171972a82018-12-17 14:28:03 +00002314 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002315 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
2316 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00002317 {
2318 std::stringstream ss;
2319 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002320 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00002321 << " does not equal output shape "
2322 << actualOutputTensorInfo.GetShape()
2323 << ": "
2324 << CHECK_LOCATION().AsString();
2325 throw ParseException(ss.str());
2326 }
2327
Sadikb94967b2018-09-19 15:30:00 +01002328 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00002329 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01002330
Sadikb94967b2018-09-19 15:30:00 +01002331 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002332 ARMNN_ASSERT(layer != nullptr);
kevmay0171972a82018-12-17 14:28:03 +00002333 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01002334
2335 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2336 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2337
2338 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2339 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2340}
2341
Kevin May7d96b162021-02-03 17:38:41 +00002342void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002343{
Sadik Armagana3b31f02019-12-05 09:08:53 +00002344 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
2345}
2346
Kevin May7d96b162021-02-03 17:38:41 +00002347void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagana3b31f02019-12-05 09:08:53 +00002348{
2349 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
2350}
2351
Kevin May7d96b162021-02-03 17:38:41 +00002352void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
Sadik Armagana3b31f02019-12-05 09:08:53 +00002353{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002354 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2355
2356 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2357 CHECK_VALID_SIZE(inputs.size(), 2);
2358
2359 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2360 CHECK_VALID_SIZE(outputs.size(), 1);
2361
2362 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
2363
2364 // Data for the parsed tensor args (size) must be stored locally.
2365 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2366
2367 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2368 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2369
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002370 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002371 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002372 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002373 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2374 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002375
James Ward58dec6b2020-09-11 17:32:44 +01002376 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00002377
2378 switch (resizeMethod)
2379 {
2380 case ResizeMethod::Bilinear:
2381 {
James Ward58dec6b2020-09-11 17:32:44 +01002382 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002383
2384 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2385 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2386
David Monahan4a0c9b92020-05-30 09:48:39 +01002387 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002388 break;
2389 }
2390 case ResizeMethod::NearestNeighbor:
2391 {
James Ward58dec6b2020-09-11 17:32:44 +01002392 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00002393 break;
2394 }
2395 default:
2396 {
2397 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002398 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
2399 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00002400 }
2401 }
2402
James Conroy05102392020-06-24 15:39:55 +01002403 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002404 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002405 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2406
2407 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
2408 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002409 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2410
2411 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2412 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2413
2414 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2415 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2416}
2417
Kevin May7d96b162021-02-03 17:38:41 +00002418void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan479045b2018-10-01 11:51:37 +01002419{
2420 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2421
2422 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2423 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2424
2425 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2426
2427 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2428 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2429 CHECK_VALID_SIZE(outputs.size(), 1);
2430
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002431 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2432 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002433
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002434 const unsigned int concatDimInput = static_cast<unsigned int>(
2435 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002436
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002437 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2438 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002439
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002440 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002441
2442 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2443 {
2444 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2445
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002446 // This set up concatDescriptor view origin
2447 armnnUtils::ProcessConcatInputTensorInfo(
2448 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002449 }
2450
James Ward58dec6b2020-09-11 17:32:44 +01002451 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002452 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002453
Jim Flynn906f9462019-05-10 13:55:21 +01002454 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002455 ARMNN_ASSERT(layer != nullptr);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002456 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002457
James Conroy05102392020-06-24 15:39:55 +01002458 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002459 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002460
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002461 // add fused activation layer
2462 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002463
Sadik Armagan479045b2018-10-01 11:51:37 +01002464 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2465 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2466}
2467
Kevin May7d96b162021-02-03 17:38:41 +00002468void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002469{
2470 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2471
2472 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2473 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2474
2475 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2476
2477 FullyConnectedDescriptor desc;
2478 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002479 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002480
2481 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2482 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2483 CHECK_VALID_SIZE(outputs.size(), 1);
2484
2485 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2486
2487 // Fully Connected Layer accepts two dimensional weights input
2488 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2489 if (weightsDimension != 2)
2490 {
2491 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002492 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
2493 "Node {}",
2494 weightsDimension,
2495 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002496 }
2497
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002498 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01002499 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002500
Finn Williamsd4fa5452021-03-01 12:31:41 +00002501 Optional<ConstTensor> filterOptionalConstTensor;
2502
2503 desc.m_ConstantWeights = IsConstTensor(inputs[1]);
2504
Finn Williamsd4fa5452021-03-01 12:31:41 +00002505 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2506 std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0]};
2507 if (desc.m_ConstantWeights)
2508 {
2509 filterOptionalConstTensor = Optional<ConstTensor>(CreateConstTensorNonPermuted(inputs[1], filterTensorInfo));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002510 }
2511 else
2512 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00002513 // Non const weights will need to be registered as inputs
2514 tensorIndexesToRegister.emplace_back(inputTensorIndexes[1]);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002515 }
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002516
Finn Williamsd4fa5452021-03-01 12:31:41 +00002517 Optional<ConstTensor> biasOptionalConstTensor;
2518 if (inputs.size() == 3)
2519 {
2520 desc.m_BiasEnabled = true;
2521 if (desc.m_ConstantWeights)
2522 {
2523 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
2524 biasOptionalConstTensor = Optional<ConstTensor>(CreateConstTensorNonPermuted(inputs[2], biasTensorInfo));
2525 }
2526 else
2527 {
2528 // Non const biases will need to be registered as inputs
2529 tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
2530 }
2531 }
2532
2533 layer = m_Network->AddFullyConnectedLayer(desc,
2534 filterOptionalConstTensor,
2535 biasOptionalConstTensor,
2536 layerName.c_str());
2537
2538 ARMNN_ASSERT(layer != nullptr);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002539 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2540
Finn Williamsd4fa5452021-03-01 12:31:41 +00002541 unsigned int startingSlotIndex = 0;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002542 if (inputTensorInfo.GetNumDimensions() > 2)
2543 {
2544 // Add reshape to flatten to 2D [batch_size, input_size],
2545 // where "input_size" corresponds to the number of inputs to the layer,
2546 // matching the second dimension of weights,
2547 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2548 std::vector<unsigned int> reshapedDimensions(2);
2549 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2550 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2551
2552 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2553 {
2554 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002555 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
2556 reshapedDimensions[1],
2557 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002558 }
2559
2560 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2561 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2562
James Ward58dec6b2020-09-11 17:32:44 +01002563 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Finn Williamsd4fa5452021-03-01 12:31:41 +00002564 armnn::ReshapeDescriptor reshapeDescriptor;
2565 reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
2566 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor, layerName.c_str());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002567
2568 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2569 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2570
2571 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
Finn Williamsd4fa5452021-03-01 12:31:41 +00002572 // Fc layer connects to the reshape layer, so we skip the first input slot when registering fc's input slots
2573 tensorIndexesToRegister.erase(tensorIndexesToRegister.begin());
2574 startingSlotIndex = 1;
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002575 }
Finn Williamsd4fa5452021-03-01 12:31:41 +00002576
2577 RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister, startingSlotIndex);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002578
Sadik Armagand109a4d2020-07-28 10:42:13 +01002579 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002580 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2581
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002582 // we need to add the activation layer and fortunately we don't need to care about the data layout
2583 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2584 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002585
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002586 // register the output connection slots for the layer, connections are made after all layers have been created
2587 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2588 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2589}
2590
Kevin May7d96b162021-02-03 17:38:41 +00002591void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
keidav011b3e2ea2019-02-21 10:07:37 +00002592{
2593 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2594
2595 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2596
2597 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2598 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2599 CHECK_VALID_SIZE(outputs.size(), 4);
2600
2601 // Obtain custom options from flexbuffers
2602 auto custom_options = operatorPtr->custom_options;
2603 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2604
2605 // Obtain descriptor information from tf lite
2606 DetectionPostProcessDescriptor desc;
2607 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2608 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2609 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2610 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2611 desc.m_NumClasses = m["num_classes"].AsUInt32();
2612 desc.m_ScaleH = m["h_scale"].AsFloat();
2613 desc.m_ScaleW = m["w_scale"].AsFloat();
2614 desc.m_ScaleX = m["x_scale"].AsFloat();
2615 desc.m_ScaleY = m["y_scale"].AsFloat();
2616
keidav0107d58c72019-02-26 11:57:39 +00002617 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002618 {
keidav0107d58c72019-02-26 11:57:39 +00002619 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002620 }
2621 if (!(m["detections_per_class"].IsNull()))
2622 {
2623 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2624 }
2625
2626 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2627 {
2628 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2629 "must be positive and less than or equal to 1.");
2630 }
2631
2632 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
Finn Williamsd4fa5452021-03-01 12:31:41 +00002633 auto anchorTensorAndData = CreateConstTensorNonPermuted(inputs[2], anchorTensorInfo);
keidav011b3e2ea2019-02-21 10:07:37 +00002634
James Ward58dec6b2020-09-11 17:32:44 +01002635 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsd4fa5452021-03-01 12:31:41 +00002636 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData,
keidav011b3e2ea2019-02-21 10:07:37 +00002637 layerName.c_str());
2638
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002639 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00002640
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002641 // The model does not specify the output shapes.
2642 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2643 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2644 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2645 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2646 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2647 m_OverridenOutputShapes.push_back({ 1 });
2648
keidav011b3e2ea2019-02-21 10:07:37 +00002649 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2650 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002651 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002652 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2653 }
2654
2655 // Register the input connection slots for the layer, connections are made after all layers have been created
2656 // only the tensors for the inputs are relevant, exclude the const tensors
2657 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2658 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2659
2660 // Register the output connection slots for the layer, connections are made after all layers have been created
2661 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2662 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2663 outputTensorIndexes[1],
2664 outputTensorIndexes[2],
2665 outputTensorIndexes[3]});
2666}
2667
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002668/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
Kevin May7d96b162021-02-03 17:38:41 +00002669void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002670{
2671 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2672
2673 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2674 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2675 CHECK_VALID_SIZE(outputs.size(), 1);
2676
2677 if (inputs.size() < 1)
2678 {
2679 throw ParseException("Pack must have at least one input.");
2680 }
2681
2682 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2683 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2684
2685 StackDescriptor desc;
2686 desc.m_Axis = static_cast<uint32_t>(options->axis);
2687 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2688
2689 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2690 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2691 desc.m_InputShape = inputTensorInfo.GetShape();
2692
James Ward58dec6b2020-09-11 17:32:44 +01002693 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002694 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2695
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002696 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002697
Sadik Armagand109a4d2020-07-28 10:42:13 +01002698 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002699 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2700
2701 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2702 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2703
2704 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2705 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2706}
2707
Kevin May7d96b162021-02-03 17:38:41 +00002708void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd200e3802019-04-15 09:47:39 +01002709{
2710 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2711
2712 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2713 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2714
2715 // This unpackAxis indicates the axis to unpack
2716 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2717
2718 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2719 CHECK_VALID_SIZE(inputs.size(), 1);
2720
2721 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002722
2723 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2724 {
2725 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002726 fmt::format("The unpack axis: {} cannot be greater than or equal to "
2727 "the number of input dimension {} {}",
2728 unpackAxis,
2729 inputTensorInfo.GetNumDimensions(),
2730 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002731 }
2732
Nina Drozd200e3802019-04-15 09:47:39 +01002733 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2734 // If num is not defined, automatically infer from the length of the dimension axis.
2735 if(unpackNum == 0)
2736 {
2737 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2738 }
2739
2740 // If unpack number cannot be inferred and is still zero, throw ParseException.
2741 if(unpackNum == 0)
2742 {
2743 throw ParseException("Number to unpack must greater than zero.");
2744 }
2745
2746 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2747 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2748
2749 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2750 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2751
2752 // Add current input shape to unpackDimSizes
2753 for (unsigned int i = 0; i < inputDimSize; ++i)
2754 {
2755 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2756 }
2757
2758 if (unpackDimSizes[unpackAxis] != unpackNum)
2759 {
2760 throw ParseException("Number to unpack must be the same as length of the dimension to "
2761 "unpack along.");
2762 }
2763
2764 unpackDimSizes[unpackAxis] /= unpackNum;
2765
2766 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2767 for (unsigned int j = 0; j < unpackNum; ++j)
2768 {
2769 // Set the size of the views.
2770 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2771 {
2772 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2773 }
2774 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2775 }
2776
James Ward58dec6b2020-09-11 17:32:44 +01002777 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01002778 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002779 ARMNN_ASSERT(layer != nullptr);
Nina Drozd200e3802019-04-15 09:47:39 +01002780
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002781 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2782 unpackDimSizes.data());
2783
Nina Drozd200e3802019-04-15 09:47:39 +01002784 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2785 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2786
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002787 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2788 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2789 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002790 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01002791 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002792 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002793 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002794 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2795
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002796 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2797 outputTensorInfo.GetDataType(),
2798 outputTensorInfo.GetQuantizationScale(),
2799 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002800 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2801
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002802 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002803
2804 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2805 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2806 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2807 }
Nina Drozd200e3802019-04-15 09:47:39 +01002808}
2809
Kevin May7d96b162021-02-03 17:38:41 +00002810void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
Nina Drozd0324f482019-04-08 10:52:10 +01002811{
2812 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2813
2814 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2815 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2816
2817 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2818
Nina Drozd200e3802019-04-15 09:47:39 +01002819 // If number of splits cannot be inferred and is zero, throw ParseException.
2820 if(numSplits == 0)
2821 {
2822 throw ParseException("Number to splits must greater than zero.");
2823 }
2824
Nina Drozd0324f482019-04-08 10:52:10 +01002825 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2826 CHECK_VALID_SIZE(inputs.size(), 2);
2827 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2828 CHECK_VALID_SIZE(outputs.size(), numSplits);
2829
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002830 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2831 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
2832 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Nina Drozd0324f482019-04-08 10:52:10 +01002833
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002834 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002835 if (axisBufferPtr == nullptr)
2836 {
2837 throw ParseException(
2838 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
2839 CHECK_LOCATION().AsString()));
2840 }
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002841
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002842 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
2843 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2844 int32_t axis = axisData[0];
2845
2846 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
2847 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
2848 {
2849 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
2850 // E.g. Rank 4 tensor can have axis in range [-4, 3)
2851 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
2852 throw ParseException(
2853 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
2854 axis,
2855 CHECK_LOCATION().AsString()));
2856 }
2857
2858 const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
Nina Drozd0324f482019-04-08 10:52:10 +01002859
Nina Drozd0324f482019-04-08 10:52:10 +01002860 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002861 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002862 {
2863 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002864 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
2865 inputTensorInfo.GetNumDimensions(),
2866 MaxNumOfTensorDimensions,
2867 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01002868 }
2869
2870 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2871
2872 // Add current input shape to splitterDimSizes
2873 for (unsigned int i = 0; i < inputDimSize; ++i)
2874 {
2875 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2876 }
2877
2878 if (splitterDimSizes[splitDim] % numSplits != 0)
2879 {
2880 throw ParseException("Number of splits must evenly divide the dimension");
2881 }
2882 splitterDimSizes[splitDim] /= numSplits;
2883
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002884 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002885 for (unsigned int j = 0; j < numSplits; ++j)
2886 {
2887 // Set the size of the views.
2888 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2889 {
2890 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2891 }
2892 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2893 }
2894
James Ward58dec6b2020-09-11 17:32:44 +01002895 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01002896 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002897 ARMNN_ASSERT(layer != nullptr);
Nina Drozd0324f482019-04-08 10:52:10 +01002898
2899 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002900 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002901
Nina Drozd0324f482019-04-08 10:52:10 +01002902 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2903 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002904 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002905 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002906 }
2907
2908 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2909 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2910}
2911
Derek Lambertif0176992020-04-28 13:37:49 +01002912unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
2913{
2914 int numDims = armnn::numeric_cast<int>(numDimsIn);
2915 int v = idx < 0 ? numDims + idx : idx;
2916 ARMNN_ASSERT(v >= 0);
2917 ARMNN_ASSERT(v < numDims);
2918
2919 return static_cast<unsigned int>(v);
2920}
2921
Kevin May7d96b162021-02-03 17:38:41 +00002922void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
Derek Lambertif0176992020-04-28 13:37:49 +01002923{
2924 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2925
2926 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Ryan OShea86704732020-05-26 11:41:04 +01002927 const auto * options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01002928
2929 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2930 CHECK_VALID_SIZE(inputs.size(), 3);
2931
2932 auto& inputTensor = inputs[0];
2933 auto& splitsTensor = inputs[1];
2934 auto& axisTensor = inputs[2];
2935
2936 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
2937 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
2938 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
2939 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
2940
2941 // Inputs
2942 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2943 if (inputDimSize > MaxNumOfTensorDimensions)
2944 {
2945 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002946 fmt::format("The number of dimensions: {} for input tensors of the "
2947 "SplitV op cannot be greater than {} {}",
2948 inputTensorInfo.GetNumDimensions(),
2949 MaxNumOfTensorDimensions,
2950 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01002951 }
2952
2953 // Get split axis
2954 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002955 if (axisBufferPtr == nullptr)
2956 {
2957 throw ParseException(
2958 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
2959 CHECK_LOCATION().AsString()));
2960 }
2961
Derek Lambertif0176992020-04-28 13:37:49 +01002962 std::vector<int> axisData(axisTensorInfo.GetNumElements());
2963 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
Matthew Sloyaned7fce42021-04-15 20:46:24 +01002964 int32_t axis = axisData[0];
2965
2966 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
2967 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
2968 {
2969 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
2970 // E.g. Rank 4 tensor can have axis in range [-4, 3)
2971 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
2972 throw ParseException(
2973 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
2974 axis,
2975 CHECK_LOCATION().AsString()));
2976 }
2977 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
Derek Lambertif0176992020-04-28 13:37:49 +01002978
Derek Lambertif0176992020-04-28 13:37:49 +01002979 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01002980 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01002981 unsigned int numSplits{0};
2982
2983 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01002984 {
2985 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01002986 }
2987 else
2988 {
Ryan OShea86704732020-05-26 11:41:04 +01002989 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01002990 }
2991
2992 if (numSplits <=0)
2993 {
2994 throw ParseException("SplitV has invalid number of splits");
2995 }
2996
Jan Eilersc0761e92020-06-29 16:48:44 +01002997 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01002998 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01002999 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01003000
Jan Eilersc0761e92020-06-29 16:48:44 +01003001 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01003002 int numInferred{0};
3003 unsigned int inferIdx{0};
3004 int splitSum{0};
3005 for (auto split : splitsData)
3006 {
3007 if (split < 0)
3008 {
3009 numInferred++;
3010 inferIdx = idx;
3011 }
3012 else
3013 {
3014 splitSum += split;
3015 }
3016 idx++;
3017 }
3018 // Check for inferred Axis
3019 if (numInferred == 0)
3020 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003021 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01003022 {
3023 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
3024 }
3025 }
3026 else if (numInferred == 1)
3027 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003028 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01003029 }
3030 else
3031 {
3032 throw ParseException("Cannot infer split size for more than one split");
3033 }
3034
Derek Lambertif0176992020-04-28 13:37:49 +01003035 //Ouput size validation
3036 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3037 CHECK_VALID_SIZE(outputs.size(), numSplits);
3038
3039 // Setup Armnn descriptor
3040 SplitterDescriptor splitDesc(numSplits, inputDimSize);
3041 unsigned int accumSplit = 0;
3042 for (unsigned int j = 0; j < numSplits; ++j)
3043 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003044 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01003045
3046 // Set the size of the views.
3047 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
3048 {
3049 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
3050 if (dimIdx == splitDim)
3051 {
3052 dimSize = splitSize;
3053 }
3054 splitDesc.SetViewSize(j, dimIdx, dimSize);
3055 }
3056
3057 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
3058 accumSplit += splitSize;
3059 }
3060
James Ward58dec6b2020-09-11 17:32:44 +01003061 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01003062 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01003063 ARMNN_ASSERT(layer != nullptr);
Derek Lambertif0176992020-04-28 13:37:49 +01003064
3065 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3066 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3067
3068 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
3069 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01003070 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01003071 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
3072 }
3073
3074 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3075 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3076}
3077
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003078void TfLiteParserImpl::ParseArgMin(size_t subgraphIndex, size_t operatorIndex)
3079{
3080 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Min);
3081}
3082
Kevin May7d96b162021-02-03 17:38:41 +00003083void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
Inki Daed4619e22020-09-10 15:33:54 +09003084{
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003085 ParseArgMinMax(subgraphIndex, operatorIndex, armnn::ArgMinMaxFunction::Max);
3086}
3087
3088void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex, ArgMinMaxFunction argMinMaxFunction)
3089{
Inki Daed4619e22020-09-10 15:33:54 +09003090 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3091 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3092 CHECK_VALID_SIZE(inputs.size(), 2);
3093
3094 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3095 CHECK_VALID_SIZE(outputs.size(), 1);
3096
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003097 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3098 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[1]);
Inki Daed4619e22020-09-10 15:33:54 +09003099 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003100 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003101
3102 // Check if output tensor type is Signed32 or Signed64
Mike Kelly1f140f72021-04-06 12:25:55 +01003103 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
3104 outputTensorInfo.GetDataType() != armnn::DataType::Signed64)
3105 {
3106 throw ParseException(
3107 fmt::format(
3108 "Output tensor data type is not supported. (Supported types: Signed32 & Signed64) {}",
3109 CHECK_LOCATION().AsString()));
3110 }
Matthew Sloyan28f177c2021-04-09 14:38:52 +01003111
3112 // Get const axis value from model and set it to descriptor.
3113 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3114 if (axisBufferPtr == nullptr)
3115 {
3116 throw ParseException(
3117 fmt::format("Operation has invalid inputs. Failed to read axis. {}",
3118 CHECK_LOCATION().AsString()));
3119 }
3120
3121 std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
3122 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
3123 int32_t axis = axisData.front();
3124
3125 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3126 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3127 {
3128 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
3129 // E.g. Rank 4 tensor can have axis in range [-4, 3)
3130 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
3131 throw ParseException(
3132 fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
3133 axis,
3134 CHECK_LOCATION().AsString()));
3135 }
3136
3137 ArgMinMaxDescriptor desc;
3138 desc.m_Axis = axis;
3139 desc.m_Function = argMinMaxFunction;
3140
3141 // Register a ArgMin/ArgMax layer.
3142 auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}";
3143 auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3144 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str());
3145 ARMNN_ASSERT(layer != nullptr);
Inki Daed4619e22020-09-10 15:33:54 +09003146 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3147
3148 // Register input tensor to the layer.
3149 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3150 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3151
3152 // Register output tensor to the layer.
3153 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3154 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3155}
3156
Kevin May7d96b162021-02-03 17:38:41 +00003157void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00003158{
3159 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3160
Kevin May7d96b162021-02-03 17:38:41 +00003161 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003162 CHECK_VALID_SIZE(inputs.size(), 2);
Kevin May7d96b162021-02-03 17:38:41 +00003163 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003164 CHECK_VALID_SIZE(outputs.size(), 1);
3165
3166 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
3167 armnn::TensorInfo indicesTensorInfo = ToTensorInfo(inputs[1]);
3168 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3169
3170 armnn::GatherDescriptor gatherDescriptor;
3171
3172 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3173 const auto * options = operatorPtr->builtin_options.AsGatherOptions();
3174 auto axis = options->axis;
3175
3176 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
3177 auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
3178 auto outputDimensions = outputTensorInfo.GetNumDimensions();
3179 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
3180 {
3181 throw ParseException(
3182 fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
3183 axis,
3184 inputDimensions, inputDimensions,
3185 CHECK_LOCATION().AsString()));
3186 }
3187 if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
3188 {
3189 throw ParseException(
3190 fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
3191 outputDimensions,
3192 inputDimensions, indicesDimensions,
3193 CHECK_LOCATION().AsString()));
3194 }
3195
3196 gatherDescriptor.m_Axis = axis;
3197
3198 auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
3199 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
3200 ARMNN_ASSERT(layer != nullptr);
3201 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3202
3203 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3204 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
3205
3206 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3207 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3208}
3209
Kevin May7d96b162021-02-03 17:38:41 +00003210void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan26868492021-01-22 14:25:31 +00003211{
3212 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3213
Kevin May7d96b162021-02-03 17:38:41 +00003214 TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003215 CHECK_VALID_SIZE(inputs.size(), 1);
Kevin May7d96b162021-02-03 17:38:41 +00003216 TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan26868492021-01-22 14:25:31 +00003217 CHECK_VALID_SIZE(outputs.size(), 1);
3218
3219 armnn::DepthToSpaceDescriptor descriptor;
3220
3221 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3222 const auto * options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
3223 auto blockSize = options->block_size;
3224 if (blockSize < 2)
3225 {
3226 throw ParseException(
3227 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
3228 blockSize,
3229 CHECK_LOCATION().AsString()));
3230 }
3231 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
3232
3233 auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
3234 IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
3235 ARMNN_ASSERT(layer != nullptr);
3236 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3237 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3238
3239 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3240 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3241
3242 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3243 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
3244}
3245
Kevin May7d96b162021-02-03 17:38:41 +00003246void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003247{
Sadik Armagana2747482021-02-09 10:28:54 +00003248 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Sum);
3249}
3250
3251void TfLiteParserImpl::ParseReduceMax(size_t subgraphIndex, size_t operatorIndex)
3252{
3253 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Max);
3254}
3255
3256void TfLiteParserImpl::ParseReduceMin(size_t subgraphIndex, size_t operatorIndex)
3257{
3258 ParseReduce(subgraphIndex, operatorIndex, armnn::ReduceOperation::Min);
3259}
3260
3261void TfLiteParserImpl::ParseReduce(size_t subgraphIndex, size_t operatorIndex, ReduceOperation reduceOperation)
3262{
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003263 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3264
3265 const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
3266 const auto *options = operatorPtr->builtin_options.AsReducerOptions();
3267
3268 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3269 CHECK_VALID_SIZE(inputs.size(), 2);
3270
3271 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3272 CHECK_VALID_SIZE(outputs.size(), 1);
3273
Sadik Armagana2747482021-02-09 10:28:54 +00003274 auto layerName = fmt::format("Reduce:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003275
3276 armnn::TensorInfo inputTensorInfo0 = ToTensorInfo(inputs[0]);
3277 armnn::TensorInfo inputTensorInfo1 = ToTensorInfo(inputs[1]);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003278
3279 ReduceDescriptor desc;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003280 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
3281 // Get const axis value from model and set it to descriptor.
3282 if (axisBufferPtr != nullptr)
3283 {
Sadik Armagan49bdb792021-02-11 13:57:07 +00003284 std::vector<int32_t> axisData(inputTensorInfo1.GetNumElements());
3285 ::memcpy(axisData.data(), axisBufferPtr->data.data(), inputTensorInfo1.GetNumBytes());
3286
3287 // Convert the axis to unsigned int and remove duplicates.
3288 auto rank = static_cast<int32_t>(inputTensorInfo0.GetNumDimensions());
3289 std::set<unsigned int> uniqueAxis;
3290 std::transform(axisData.begin(),
3291 axisData.end(),
3292 std::inserter(uniqueAxis, uniqueAxis.begin()),
3293 [rank](int i)->unsigned int{
3294 return static_cast<uint32_t>(((i + rank) % rank)); });
3295 desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003296 }
Sadik Armagana2747482021-02-09 10:28:54 +00003297 else
3298 {
3299 for (uint32_t i = 0; i < inputTensorInfo0.GetNumDimensions(); ++i)
3300 {
3301 desc.m_vAxis.push_back(i);
3302 }
3303 }
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003304
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003305 desc.m_KeepDims = options->keep_dims;
Sadik Armagana2747482021-02-09 10:28:54 +00003306 desc.m_ReduceOperation = reduceOperation;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003307
3308 // Register a new layer object, Sum.
3309 IConnectableLayer *layer = m_Network->AddReduceLayer(desc, layerName.c_str());
3310
3311 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
3312 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3313
3314 // Register input tensor to the layer.
3315 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3316 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3317
3318 // Register output tensor to the layer.
3319 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3320 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3321}
3322
Matthew Sloyaned7fce42021-04-15 20:46:24 +01003323void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
3324{
3325 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
3326}
3327
3328void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
3329{
3330 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
3331}
3332
3333void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
3334{
3335 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
3336}
3337
3338void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
3339{
3340 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
3341}
3342
3343void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
3344{
3345 ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
3346}
3347
3348void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
3349{
3350 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
3351
3352 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
3353 CHECK_VALID_SIZE(inputs.size(), 1);
3354
3355 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
3356 CHECK_VALID_SIZE(outputs.size(), 1);
3357
3358 std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
3359 std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
3360
3361 ElementwiseUnaryDescriptor desc;
3362 desc.m_Operation = unaryOperation;
3363 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
3364 ARMNN_ASSERT(layer != nullptr);
3365
3366 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
3367 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
3368
3369 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
3370 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
3371
3372 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
3373 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
3374}
3375
Kevin May7d96b162021-02-03 17:38:41 +00003376armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
3377 unsigned int outputSlot,
3378 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01003379{
3380 ActivationDescriptor activationDesc;
3381 std::string layerName = prevLayer->GetName();
3382
3383 switch(activationType)
3384 {
3385 case tflite::ActivationFunctionType_NONE:
3386 {
3387 // this is a no-op: return previous layer
3388 return prevLayer;
3389 }
3390 case tflite::ActivationFunctionType_RELU:
3391 {
3392 activationDesc.m_Function = ActivationFunction::ReLu;
3393 layerName += ":RELU";
3394 break;
3395 }
3396 case tflite::ActivationFunctionType_RELU6:
3397 {
3398 activationDesc.m_Function = ActivationFunction::BoundedReLu;
3399 activationDesc.m_A = 6.0f;
3400 activationDesc.m_B = 0.0f;
3401 layerName += ":RELU6";
3402 break;
3403 }
3404 case tflite::ActivationFunctionType_TANH:
3405 {
3406 activationDesc.m_Function = ActivationFunction::TanH;
3407 activationDesc.m_A = 1.0f;
3408 activationDesc.m_B = 1.0f;
3409 layerName += ":TANH";
3410 break;
3411 }
3412
3413 // I only put these here as a reminder what others we could support
3414 case tflite::ActivationFunctionType_RELU_N1_TO_1:
3415 case tflite::ActivationFunctionType_SIGN_BIT:
3416 default:
3417 {
3418 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003419 fmt::format("TfLite parser doesn't suppport fused activation: "
3420 "{}/{} {} ",
3421 activationType,
3422 tflite::EnumNameActivationFunctionType(activationType),
3423 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003424
3425 }
3426 }
3427
3428 IConnectableLayer* activationLayer =
3429 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
3430
3431 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
3432 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
3433 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
3434 return activationLayer;
3435}
3436
Kevin May7d96b162021-02-03 17:38:41 +00003437TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char * fileName)
telsoa01c577f2c2018-08-31 09:22:23 +01003438{
3439 if (fileName == nullptr)
3440 {
James Ward58dec6b2020-09-11 17:32:44 +01003441 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003442 CHECK_LOCATION().AsString()));
3443 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01003444 std::error_code errorCode;
3445 fs::path pathToFile(fileName);
3446 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01003447 {
James Ward58dec6b2020-09-11 17:32:44 +01003448 //fmt::format() could not be used here (format error)
3449 std::stringstream msg;
3450 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
3451 << " " << CHECK_LOCATION().AsString();
3452
3453 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01003454 }
3455 std::ifstream file(fileName, std::ios::binary);
3456 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3457 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3458 fileContent.size());
3459}
3460
Kevin May7d96b162021-02-03 17:38:41 +00003461TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
telsoa01c577f2c2018-08-31 09:22:23 +01003462{
3463 if (binaryContent == nullptr)
3464 {
James Ward58dec6b2020-09-11 17:32:44 +01003465 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003466 CHECK_LOCATION().AsString()));
3467 }
3468 flatbuffers::Verifier verifier(binaryContent, len);
3469 if (verifier.VerifyBuffer<tflite::Model>() == false)
3470 {
3471 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003472 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
3473 "flatbuffers format. size:{} {}",
3474 len,
3475 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003476 }
3477 return tflite::UnPackModel(binaryContent);
3478}
3479
Kevin May7d96b162021-02-03 17:38:41 +00003480TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetInputs(const ModelPtr & model,
3481 size_t subgraphIndex,
3482 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003483{
3484 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3485
Derek Lambertiff05cc52019-04-26 13:05:17 +01003486 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3487 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003488
3489 size_t inputCount = operatorPtr->inputs.size();
mathad01c21025d2021-04-26 10:09:37 +01003490 TensorRawPtrVector result;
telsoa01c577f2c2018-08-31 09:22:23 +01003491 for (size_t i=0; i<inputCount; ++i)
3492 {
mathad01c21025d2021-04-26 10:09:37 +01003493 // If the input location is -1 then assume input is turned off.
3494 if (operatorPtr->inputs[i] == -1)
3495 {
3496 continue;
3497 }
3498 else
3499 {
3500 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
3501 result.push_back(subgraphPtr->tensors[inputId].get());
3502 }
telsoa01c577f2c2018-08-31 09:22:23 +01003503 }
3504 return result;
3505}
3506
Kevin May7d96b162021-02-03 17:38:41 +00003507TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetOutputs(const ModelPtr & model,
3508 size_t subgraphIndex,
3509 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003510{
3511 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3512
Derek Lambertiff05cc52019-04-26 13:05:17 +01003513 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3514 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003515
3516 size_t outputCount = operatorPtr->outputs.size();
3517 TensorRawPtrVector result(outputCount);
3518 for (size_t i=0; i<outputCount; ++i)
3519 {
3520 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3521 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003522 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003523 }
3524 return result;
3525}
3526
Kevin May7d96b162021-02-03 17:38:41 +00003527TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphInputs(const ModelPtr & model,
3528 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003529{
3530 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003531 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003532
Derek Lambertiff05cc52019-04-26 13:05:17 +01003533 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003534 TensorIdRawPtrVector result(inputCount);
3535 for (size_t i=0; i<inputCount; ++i)
3536 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003537 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01003538 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003539 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003540 }
3541 return result;
3542}
3543
Kevin May7d96b162021-02-03 17:38:41 +00003544TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphOutputs(const ModelPtr & model,
3545 size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003546{
3547 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003548 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003549
Derek Lambertiff05cc52019-04-26 13:05:17 +01003550 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003551 TensorIdRawPtrVector result(outputCount);
3552 for (size_t i=0; i<outputCount; ++i)
3553 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003554 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3555 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003556 }
3557 return result;
3558}
3559
Kevin May7d96b162021-02-03 17:38:41 +00003560std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
3561 size_t subgraphIndex,
3562 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003563{
3564 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003565 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3566 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003567 return operatorPtr->inputs;
3568}
3569
Kevin May7d96b162021-02-03 17:38:41 +00003570std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
3571 size_t subgraphIndex,
3572 size_t operatorIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003573{
3574 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003575 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3576 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003577 return operatorPtr->outputs;
3578}
3579
Kevin May7d96b162021-02-03 17:38:41 +00003580void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
3581 size_t operatorIndex,
3582 IConnectableLayer* layer,
Finn Williamsd4fa5452021-03-01 12:31:41 +00003583 const std::vector<unsigned int>& tensorIndexes,
3584 unsigned int startingSlotIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003585{
3586 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003587 ARMNN_ASSERT(layer != nullptr);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003588 if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots())
telsoa01c577f2c2018-08-31 09:22:23 +01003589 {
3590 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003591 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
3592 " for subgraph:{} operator index:{} {}",
3593 tensorIndexes.size(),
3594 layer->GetNumInputSlots(),
3595 subgraphIndex,
3596 operatorIndex,
3597 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003598 }
3599
Finn Williamsd4fa5452021-03-01 12:31:41 +00003600 for (unsigned int index = 0; index < tensorIndexes.size() ; ++index)
telsoa01c577f2c2018-08-31 09:22:23 +01003601 {
Finn Williamsd4fa5452021-03-01 12:31:41 +00003602 unsigned int tensorIndex = tensorIndexes[index];
3603 armnn::IInputSlot* slot = &(layer->GetInputSlot(startingSlotIndex + index));
telsoa01c577f2c2018-08-31 09:22:23 +01003604 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
3605 }
3606}
3607
Kevin May7d96b162021-02-03 17:38:41 +00003608void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
3609 size_t operatorIndex,
3610 IConnectableLayer* layer,
3611 const std::vector<unsigned int>& tensorIndexes)
telsoa01c577f2c2018-08-31 09:22:23 +01003612{
3613 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003614 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003615 if (tensorIndexes.size() != layer->GetNumOutputSlots())
3616 {
3617 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003618 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
3619 " for subgraph:{} operator index:{} {}",
3620 tensorIndexes.size(),
3621 layer->GetNumOutputSlots(),
3622 subgraphIndex,
3623 operatorIndex,
3624 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003625 }
3626
3627 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
3628 {
3629 unsigned int tensorIndex = tensorIndexes[slotIndex];
3630 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
3631 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3632 }
3633}
3634
Kevin May7d96b162021-02-03 17:38:41 +00003635void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003636{
3637 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3638
3639 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
3640 for (auto const & tensorIdAndPtr : inputs)
3641 {
3642 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3643 IConnectableLayer* layer =
3644 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3645
3646 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
3647 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3648
3649 RegisterOutputSlots(subgraphIndex,
3650 VIRTUAL_OPERATOR_ID,
3651 layer,
3652 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3653 }
3654}
3655
Kevin May7d96b162021-02-03 17:38:41 +00003656void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003657{
3658 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3659
3660 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
3661 for (auto const & tensorIdAndPtr : outputs)
3662 {
3663 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3664 IConnectableLayer* layer =
3665 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3666
3667 RegisterInputSlots(subgraphIndex,
3668 VIRTUAL_OPERATOR_ID,
3669 layer,
3670 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3671 }
3672}
3673
Kevin May7d96b162021-02-03 17:38:41 +00003674void TfLiteParserImpl::SetupConstantLayers(size_t subgraphIndex)
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003675{
3676 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3677
Derek Lambertiff05cc52019-04-26 13:05:17 +01003678 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003679 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
3680 {
3681 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
3682 {
3683 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
3684 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
3685 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003686 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003687 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
Finn Williamsd4fa5452021-03-01 12:31:41 +00003688 auto tensorAndData = CreateConstTensorNonPermuted(tensorPtr, tensorInfo);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003689
James Ward58dec6b2020-09-11 17:32:44 +01003690 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003691 IConnectableLayer *layer =
Finn Williamsd4fa5452021-03-01 12:31:41 +00003692 m_Network->AddConstantLayer(tensorAndData, layerName.c_str());
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003693
3694 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3695 RegisterOutputSlots(subgraphIndex,
3696 VIRTUAL_OPERATOR_ID,
3697 layer,
3698 { tensorIndex });
3699
3700 }
3701 }
3702 }
3703}
3704
telsoa01c577f2c2018-08-31 09:22:23 +01003705// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
Kevin May7d96b162021-02-03 17:38:41 +00003706TfLiteParserImpl::BufferRawPtr TfLiteParserImpl::GetBuffer(const ModelPtr& model, size_t bufferIndex)
telsoa01c577f2c2018-08-31 09:22:23 +01003707{
3708 CHECK_BUFFER(model, bufferIndex);
3709 return model->buffers[bufferIndex].get();
3710}
3711
Matteo Martincigh747ef822018-12-18 09:26:39 +00003712template<typename T>
Kevin May7d96b162021-02-03 17:38:41 +00003713std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
3714TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
3715 TfLiteParserImpl::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00003716 armnn::TensorInfo& tensorInfo,
3717 armnn::Optional<armnn::PermutationVector&> permutationVector)
3718{
3719 auto constData = CreateConstTensorImpl<T>(bufferPtr,
3720 tensorPtr,
3721 tensorInfo,
3722 permutationVector);
Kevin May7d96b162021-02-03 17:38:41 +00003723 TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
Matteo Martincigh747ef822018-12-18 09:26:39 +00003724 return std::make_pair(constData.first, std::move(storage));
3725}
3726
Finn Williamsd4fa5452021-03-01 12:31:41 +00003727bool TfLiteParserImpl::IsConstTensor(TensorRawPtr tensorPtr)
3728{
3729 CHECK_TENSOR_PTR(tensorPtr);
mathad01bf7edb62021-04-20 16:12:45 +01003730 bool isConst = true;
3731
3732 auto buffer = GetBuffer(m_Model, tensorPtr->buffer);
3733 if (buffer->data.size() == 0)
3734 {
3735 isConst = false;
3736 }
3737
3738 return isConst;
Finn Williamsd4fa5452021-03-01 12:31:41 +00003739}
3740
3741
Kevin May7d96b162021-02-03 17:38:41 +00003742std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
Finn Williamsd4fa5452021-03-01 12:31:41 +00003743TfLiteParserImpl::CreateConstTensorPermuted(TensorRawPtr tensorPtr,
3744 armnn::TensorInfo& tensorInfo,
3745 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01003746{
3747 CHECK_TENSOR_PTR(tensorPtr);
3748 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
3749 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
3750
3751 switch (tensorInfo.GetDataType())
3752 {
3753 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003754 return CreateConstTensorAndStoreData<float>(bufferPtr,
3755 tensorPtr,
3756 tensorInfo,
3757 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00003758 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003759 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
3760 tensorPtr,
3761 tensorInfo,
3762 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00003763 case armnn::DataType::QSymmS8:
3764 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3765 tensorPtr,
3766 tensorInfo,
3767 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00003768 case armnn::DataType::QAsymmS8:
3769 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3770 tensorPtr,
3771 tensorInfo,
3772 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003773 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003774 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
3775 tensorPtr,
3776 tensorInfo,
3777 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003778 default:
3779 {
3780 std::stringstream errString;
3781 errString << "Unexpected datatype when creating const tensor: "
3782 << armnn::GetDataTypeName(tensorInfo.GetDataType())
3783 << " shape:" << tensorInfo.GetShape()
3784 << CHECK_LOCATION().AsString();
3785 throw ParseException(errString.str());
3786 }
3787 }
3788}
3789
Finn Williamsd4fa5452021-03-01 12:31:41 +00003790armnn::ConstTensor TfLiteParserImpl::CreateConstTensorNonPermuted(TensorRawPtr tensorPtr,
3791 armnn::TensorInfo& tensorInfo)
3792{
3793 CHECK_TENSOR_PTR(tensorPtr);
3794 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
3795 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
3796
3797 return ConstTensor(tensorInfo, bufferPtr->data.data());
3798}
3799
Kevin May7d96b162021-02-03 17:38:41 +00003800BindingPointInfo TfLiteParserImpl::GetNetworkInputBindingInfo(size_t subgraphId,
3801 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01003802{
3803 CHECK_SUBGRAPH(m_Model, subgraphId);
3804 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3805 for (auto const & input : inputs)
3806 {
3807 if (input.second->name == name)
3808 {
3809 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3810 return std::make_pair(bindingId, ToTensorInfo(input.second));
3811 }
3812 }
3813
3814 std::stringstream bindings;
3815 for (auto const & input : inputs)
3816 {
3817 bindings << "'" << input.second->name << "' ";
3818 }
3819
3820 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003821 fmt::format("No input binding found for subgraph:{} and name:{}. "
3822 "Possible inputs are: [{}] {}",
3823 subgraphId,
3824 name,
3825 bindings.str(),
3826 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003827}
3828
Kevin May7d96b162021-02-03 17:38:41 +00003829BindingPointInfo TfLiteParserImpl::GetNetworkOutputBindingInfo(size_t subgraphId,
3830 const std::string& name) const
telsoa01c577f2c2018-08-31 09:22:23 +01003831{
3832 CHECK_SUBGRAPH(m_Model, subgraphId);
3833 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003834 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01003835 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003836 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01003837 if (output.second->name == name)
3838 {
3839 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003840 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3841 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3842 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01003843 }
3844 }
3845
3846 std::stringstream bindings;
3847 for (auto const & output : outputs)
3848 {
3849 bindings << "'" << output.second->name << "' ";
3850 }
3851
3852 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003853 fmt::format("No output binding found for subgraph:{} and name:{}. "
3854 "Possible outputs are: [{}] {}",
3855 subgraphId,
3856 name,
3857 bindings.str(),
3858 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003859}
3860
Kevin May7d96b162021-02-03 17:38:41 +00003861size_t TfLiteParserImpl::GetSubgraphCount() const
telsoa01c577f2c2018-08-31 09:22:23 +01003862{
3863 return m_Model->subgraphs.size();
3864}
3865
Kevin May7d96b162021-02-03 17:38:41 +00003866std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01003867{
3868 CHECK_SUBGRAPH(m_Model, subgraphId);
3869 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3870 std::vector<std::string> result;
3871 result.reserve(inputs.size());
3872 for (auto const & input : inputs)
3873 {
3874 result.push_back(input.second->name);
3875 }
3876 return result;
3877}
3878
Kevin May7d96b162021-02-03 17:38:41 +00003879std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
telsoa01c577f2c2018-08-31 09:22:23 +01003880{
3881 CHECK_SUBGRAPH(m_Model, subgraphId);
3882 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3883 std::vector<std::string> result;
3884 result.reserve(outputs.size());
3885 for (auto const & output : outputs)
3886 {
3887 result.push_back(output.second->name);
3888 }
3889 return result;
3890}
3891
Matthew Sloyanac001ee2021-02-03 10:43:04 +00003892const std::string TfLiteParserImpl::GetVersion()
3893{
3894 return TFLITE_PARSER_VERSION;
3895}
3896
Kevin May7d96b162021-02-03 17:38:41 +00003897TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01003898: m_FloatData(std::move(data))
3899, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003900, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003901, m_Int32Data(nullptr)
3902{
3903}
3904
Kevin May7d96b162021-02-03 17:38:41 +00003905TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01003906: m_FloatData(nullptr)
3907, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00003908, m_Int8Data(nullptr)
3909, m_Int32Data(nullptr)
3910{
3911}
3912
Kevin May7d96b162021-02-03 17:38:41 +00003913TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
Keith Davisd305e1a2020-01-22 11:57:54 +00003914: m_FloatData(nullptr)
3915, m_Uint8Data(nullptr)
3916, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01003917, m_Int32Data(nullptr)
3918{
3919}
3920
Kevin May7d96b162021-02-03 17:38:41 +00003921TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
telsoa01c577f2c2018-08-31 09:22:23 +01003922: m_FloatData(nullptr)
3923, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003924, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003925, m_Int32Data(std::move(data))
3926{
3927}
3928
3929} // armnnTfLiteParser