blob: 1a1e854395c6bbc1f5307056b41358b14f73cd66 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kellyc5789ca2020-07-06 19:24:15 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Sadik Armagand109a4d2020-07-28 10:42:13 +01008#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +00009#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010010#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000011#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010012#include <armnn/Tensor.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010013#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010014#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000015#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010016#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010017
18// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000019#include <armnnUtils/Permute.hpp>
Francis Murtagh532a29d2020-06-29 11:50:01 +010020#include <Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000021
Sadik Armagan479045b2018-10-01 11:51:37 +010022#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010023#include <VerificationHelpers.hpp>
24
25// The generated code based on the Tf Lite schema:
26#include <schema_generated.h>
27
Matteo Martincighe011d202019-11-28 11:35:47 +000028#include <flatbuffers/flexbuffers.h>
29
James Ward58dec6b2020-09-11 17:32:44 +010030#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010031
32#include <fstream>
33#include <algorithm>
34#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010035#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000036#include <sstream>
37
38#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
39 { \
40 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
41 << ": " \
42 << CHECK_LOCATION().AsString()).str()); \
43 }
telsoa01c577f2c2018-08-31 09:22:23 +010044
45using namespace armnn;
46using armnn::CheckLocation;
47namespace armnnTfLiteParser
48{
49namespace
50{
jimfly01c25411c2018-11-14 17:47:22 +000051
telsoa01c577f2c2018-08-31 09:22:23 +010052const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
53
54void CheckSubgraph(const TfLiteParser::ModelPtr & model,
55 size_t subgraphIndex,
56 const CheckLocation & location)
57{
58 if (model.get() == nullptr)
59 {
60 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +010061 fmt::format("{} was called with invalid (null) model. "
62 "Possible reason is that the model is not yet loaded and Unpack(ed). "
63 "subgraph:{} at {}",
64 location.m_Function,
65 subgraphIndex,
66 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +010067 }
68 else if (subgraphIndex >= model->subgraphs.size())
69 {
70 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +010071 fmt::format("{} was called with an invalid subgraph index. "
72 "subgraph:{} at {}",
73 location.m_Function,
74 subgraphIndex,
75 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +010076 }
77}
78
79#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
80 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
81
82void CheckModel(const TfLiteParser::ModelPtr & model,
83 size_t subgraphIndex,
84 size_t operatorIndex,
85 const CheckLocation & location)
86{
87 if (model.get() == nullptr)
88 {
89 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +010090 fmt::format("{} was called with invalid (null) model. "
91 "Possible reason is that the model is not yet loaded and Unpack(ed). "
92 "subgraph:{} operator:{} at {}",
93 location.m_Function,
94 subgraphIndex,
95 operatorIndex,
96 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +010097 }
98 else if (subgraphIndex >= model->subgraphs.size())
99 {
100 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100101 fmt::format("{} was called with an invalid subgraph index. "
102 "subgraph:{} operator:{} at {}",
103 location.m_Function,
104 subgraphIndex,
105 operatorIndex,
106 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100107 }
108 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
109 operatorIndex != VIRTUAL_OPERATOR_ID)
110 {
111 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100112 fmt::format("{} was called with an invalid operator index. "
113 "subgraph:{} operator:{} at {}",
114 location.m_Function,
115 subgraphIndex,
116 operatorIndex,
117 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100118 }
119}
120
121#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
122 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
123
124void CheckTensor(const TfLiteParser::ModelPtr & model,
125 size_t subgraphIndex,
126 size_t tensorIndex,
127 const CheckLocation & location)
128{
129 // not checking model, because I assume CHECK_MODEL already run
130 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100131 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100132
133 // also subgraph index should be checked by CHECK_MODEL so
134 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100135 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100136
137 // the tensor index is the only one to check here
138 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
139 {
140 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100141 fmt::format("{} was called with an invalid tensor index. "
142 "subgraph:{} tensor:{} at {}",
143 location.m_Function,
144 subgraphIndex,
145 tensorIndex,
146 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100147 }
148}
149
150#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
151 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
152
153void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
154 const CheckLocation & location)
155{
156 if (rawPtr == nullptr)
157 {
158 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100159 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100160 }
161}
162
163#define CHECK_TENSOR_PTR(TENSOR_PTR) \
164 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
165
166void CheckBuffer(const TfLiteParser::ModelPtr & model,
167 size_t bufferIndex,
168 const CheckLocation & location)
169{
170 if (model.get() == nullptr)
171 {
172 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100173 fmt::format("{} was called with invalid (null) model. "
174 "Possible reason is that the model is not yet loaded and Unpack(ed). "
175 "buffer:{} at {}",
176 location.m_Function,
177 bufferIndex,
178 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100179 }
180 else if (bufferIndex >= model->buffers.size())
181 {
182 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100183 fmt::format("{} was called with an invalid buffer index. "
184 "buffer index:{} at {}",
185 location.m_Function,
186 bufferIndex,
187 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100188 }
189 else if (model->buffers[bufferIndex].get() == nullptr)
190 {
191 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100192 fmt::format("The buffer #{} is null. {}",
193 bufferIndex,
194 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100195 }
196}
197
198#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
199 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
200
201void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
202 const armnn::TensorInfo & tensorInfo,
203 uint32_t bufferId,
204 const CheckLocation & location)
205{
206 if (bufferPtr == nullptr)
207 {
208 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100209 fmt::format("BufferPtr is null for buffer:{}. {}",
210 bufferId,
211 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100212 }
213 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
214 tensorInfo.GetNumBytes() > bufferPtr->data.size())
215 {
216 std::stringstream ss;
217 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
218 << "For tensor: " << tensorInfo.GetShape()
219 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
220 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
221 throw ParseException(ss.str());
222 }
223}
224
225#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
226 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
227
228bool IsActivationSupported(tflite::ActivationFunctionType activationType)
229{
230 switch(activationType)
231 {
232 case tflite::ActivationFunctionType_NONE:
233 case tflite::ActivationFunctionType_RELU:
234 case tflite::ActivationFunctionType_RELU6:
235 case tflite::ActivationFunctionType_TANH:
236 {
237 return true;
238 }
239 default:
240 {
241 return false;
242 }
243 }
244}
245
246#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
247 do { \
248 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
249 { \
250 throw ParseException( \
James Ward58dec6b2020-09-11 17:32:44 +0100251 fmt::format("TfLite parser doesn't suppport fused activation: " \
252 "{}/{} in {} subgraph:{} operator:{} at {}", \
253 OPTION->fused_activation_function, \
254 tflite::EnumNameActivationFunctionType(\
255 OPTION->fused_activation_function), \
256 __func__, \
257 SUBGRAPH_INDEX, \
258 OPERATOR_INDEX, \
259 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100260 } \
261 } while(false)
262
263
264std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
265{
266 std::vector<unsigned int> result;
267 result.reserve(in.size());
268 for (auto & i : in)
269 {
270 result.push_back(CHECKED_NON_NEGATIVE(i));
271 }
272 return result;
273}
274
275void CalcPadding(uint32_t inputSize,
276 uint32_t filterSize,
277 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100278 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100279 uint32_t& paddingFront,
280 uint32_t& paddingBack,
281 tflite::Padding padding)
282{
283 paddingFront = 0;
284 paddingBack = 0;
285 if (padding == tflite::Padding_SAME)
286 {
287 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100288 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
289 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100290 if (temp > inputSize)
291 {
292 paddingFront = (temp - inputSize) / 2;
293 paddingBack = (temp - inputSize) - paddingFront;
294 }
295 }
296}
297
Sadik Armagand109a4d2020-07-28 10:42:13 +0100298armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
299 const std::vector<unsigned int>& shapes,
300 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3},
301 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100302{
303 armnn::DataType type;
304 CHECK_TENSOR_PTR(tensorPtr);
305
306 switch (tensorPtr->type)
307 {
308 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000309 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100310 break;
311 case tflite::TensorType_FLOAT32:
312 type = armnn::DataType::Float32;
313 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000314 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000315 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000316 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000317 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000318 type = armnn::DataType::QAsymmS8;
319 }
320 else
321 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000322 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000323 type = armnn::DataType::QSymmS8;
324 }
Finn Williamsed66d142019-12-06 09:55:55 +0000325 break;
326 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000327 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000328 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100329 case tflite::TensorType_INT32:
330 type = armnn::DataType::Signed32;
331 break;
Inki Daed4619e22020-09-10 15:33:54 +0900332 case tflite::TensorType_INT64:
333 type = armnn::DataType::Signed64;
334 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100335 default:
336 {
337 CheckLocation location = CHECK_LOCATION();
338 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100339 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
340 tensorPtr->type,
341 tflite::EnumNameTensorType(tensorPtr->type),
342 tensorPtr->name,
343 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100344 }
345 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100346 std::vector<unsigned int> safeShape = shapes;
Sadik Armagand109a4d2020-07-28 10:42:13 +0100347 bool isDynamic = false;
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100348 if (safeShape.size() == 0)
349 {
350 safeShape.push_back(1);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100351 if (outputTensor)
352 {
353 isDynamic = true;
354 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100355 }
356
Keith Davisd305e1a2020-01-22 11:57:54 +0000357 float quantizationScale = 0.0f;
358 int32_t quantizationOffset = 0;
359
360 if (tensorPtr->quantization.get())
361 {
362 if (tensorPtr->quantization->scale.size() <= 1)
363 {
364 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
365 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
366
367 if (tensorPtr->quantization->scale.size() == 1)
368 {
369 quantizationScale = tensorPtr->quantization->scale[0];
370 }
371 if (tensorPtr->quantization->zero_point.size() == 1)
372 {
373 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000374 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100375 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000376 }
377
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100378 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100379 safeShape.data());
380 if (isDynamic)
381 {
382 tensorShape = TensorShape(1, false);
383 }
384 armnn::TensorInfo result(tensorShape,
385 type,
386 quantizationScale,
387 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000388 return result;
389 }
390 else
391 {
392 std::vector<float> quantizationScales;
393 std::vector<int32_t> quantizationOffsets;
394
395 // Scale
396 std::copy(tensorPtr->quantization->scale.begin(),
397 tensorPtr->quantization->scale.end(),
398 std::back_inserter(quantizationScales));
399
Keith Davis0c2eeac2020-02-11 16:51:50 +0000400 // QSymmS8 Per-axis
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100401 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100402 safeShape.data());
403 if (isDynamic)
404 {
405 tensorShape = TensorShape(1, false);
406 }
407 armnn::TensorInfo result(tensorShape,
408 type,
409 quantizationScales,
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100410 dimensionMappings[armnn::numeric_cast<unsigned int>(
Sadik Armagand109a4d2020-07-28 10:42:13 +0100411 tensorPtr->quantization->quantized_dimension)]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000412 return result;
413 }
414 }
415 else
416 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100417 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100418 safeShape.data());
419 if (isDynamic)
420 {
421 tensorShape = TensorShape(1, false);
422 }
423 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000424 type,
425 quantizationScale,
426 quantizationOffset);
427 return result;
428 }
telsoa01c577f2c2018-08-31 09:22:23 +0100429}
430
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +0100431armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
Keith Davis0c2eeac2020-02-11 16:51:50 +0000432 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000433{
434 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000435 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000436}
437
Sadik Armagand109a4d2020-07-28 10:42:13 +0100438armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
439 const bool outputTensor)
440{
441 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
442 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3};
443 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings, outputTensor);
444}
445
telsoa01c577f2c2018-08-31 09:22:23 +0100446template<typename T>
447std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
448CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
449 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000450 armnn::TensorInfo& tensorInfo,
451 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100452{
Jan Eilers8eb25602020-03-09 12:13:48 +0000453 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100454 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
455 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
James Ward58dec6b2020-09-11 17:32:44 +0100456 fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
telsoa01c577f2c2018-08-31 09:22:23 +0100457
458 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000459
460 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
461 {
462 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000463 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
464 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000465 }
466 else
467 {
468 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
469 }
470
telsoa01c577f2c2018-08-31 09:22:23 +0100471 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
472}
473
telsoa01c577f2c2018-08-31 09:22:23 +0100474armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
475{
476 // generate the binding id by shifting the tensor id by 8 bit
477 // and add the subgraph id, which allows 256 subgraphs
478 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
479}
480
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000481bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
482{
483 const unsigned int actualSize = actual.GetNumDimensions();
484 if (actualSize != expected.size())
485 {
486 return false;
487 }
488
489 for (unsigned int i = 0u; i < actualSize; i++)
490 {
491 if (expected[i] < 0 ||
492 actual[i] != static_cast<unsigned int>(expected[i]))
493 {
494 return false;
495 }
496 }
497
498 return true;
499}
500
James Conroy05102392020-06-24 15:39:55 +0100501void CheckMatchingQuantization(const TensorInfo& first,
502 const TensorInfo& second,
503 const std::string& descName,
504 std::string const& firstName,
505 std::string const& secondName)
506{
507 if (!first.IsQuantized() ||
508 !second.IsQuantized())
509 {
510 // Not a quantized type, ignore the validation
511 return;
512 }
513
514 DataType firstDataType = first.GetDataType();
515 DataType secondDataType = second.GetDataType();
516
517 if (firstDataType != secondDataType)
518 {
519 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
520 " must be of the same quantized type, " +
521 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
522 secondName + " is " + GetDataTypeName(secondDataType));
523 }
524
525 if (!first.IsTypeSpaceMatch(second))
526 {
527 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
528 " must have the same quantization space, " +
529 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
530 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
531 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
532 " and scale " + std::to_string(second.GetQuantizationScale()));
533 }
534}
535
telsoa01c577f2c2018-08-31 09:22:23 +0100536} // <anonymous>
537
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100538TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
539: m_Options(options)
540, m_Network(nullptr, nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +0100541, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
542{
543 // register supported operators
Sadik Armagan66dedc72019-12-10 16:32:07 +0000544 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000545 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
546 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
547 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
548 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000549 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
Sadik Armagan26868492021-01-22 14:25:31 +0000550 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParser::ParseDepthToSpace;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000551 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
Finn Williamsed66d142019-12-06 09:55:55 +0000552 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
Matthew Sloyan7515d072020-12-16 12:50:01 +0000553 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParser::ParseElu;
Derek Lambertif0176992020-04-28 13:37:49 +0100554 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParser::ParseExp;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000555 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Sadik Armagan26868492021-01-22 14:25:31 +0000556 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParser::ParseGather;
Jan Eilers2f746b32020-07-28 14:00:06 +0100557 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParser::ParseHardSwish;
Sadik Armagan12239e72020-05-27 11:06:17 +0100558 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParser::ParseLeakyRelu;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000559 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
560 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
561 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
562 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000563 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000564 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000565 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Darshan Patel83fcf982020-05-26 22:22:42 +0530566 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParser::ParseNeg;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000567 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
568 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
569 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000570 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
571 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
572 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
573 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
574 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000575 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000576 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
577 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000578 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Derek Lambertif0176992020-04-28 13:37:49 +0100579 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParser::ParseSplitV;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000580 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
581 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
582 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000583 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
584 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
585 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
586 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
Darshan Patel42b3d7d2020-05-25 22:30:07 +0530587 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParser::ParseDiv;
Inki Daed4619e22020-09-10 15:33:54 +0900588 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParser::ParseArgMax;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100589 // register supported custom operators
590 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100591}
592
593void TfLiteParser::ResetParser()
594{
595 m_Network = armnn::INetworkPtr(nullptr, nullptr);
596 m_Model = nullptr;
597 m_SubgraphConnections.clear();
598}
599
600INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
601{
602 ResetParser();
603 m_Model = LoadModelFromFile(graphFile);
604 return CreateNetworkFromModel();
605}
606
607INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
608{
609 ResetParser();
610 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
611 return CreateNetworkFromModel();
612}
613
614INetworkPtr TfLiteParser::CreateNetworkFromModel()
615{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100616
617 using NetworkOptions = std::vector<BackendOptions>;
618 NetworkOptions networkOptions = {};
619 if (m_Options && m_Options.value().m_InferAndValidate)
620 {
621 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
622 {
623 { "InferAndValidate", true }
624 });
625
626 networkOptions.push_back(shapeInferenceMethodOption);
627 }
628
629 m_Network = INetwork::Create(networkOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100630 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100631
telsoa01c577f2c2018-08-31 09:22:23 +0100632 if (m_Model->subgraphs.size() != 1)
633 {
634 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100635 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
636 m_Model->subgraphs.size(),
637 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100638 }
639
640 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +0100641 size_t operatorIndex = 0;
642 try
telsoa01c577f2c2018-08-31 09:22:23 +0100643 {
Colm Donelan6350d272020-06-09 16:56:25 +0100644 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100645 {
Colm Donelan6350d272020-06-09 16:56:25 +0100646 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
647 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +0100648 {
Colm Donelan6350d272020-06-09 16:56:25 +0100649 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
telsoa01c577f2c2018-08-31 09:22:23 +0100650 auto builtinCode = opCodePtr->builtin_code;
651
652 if (builtinCode > tflite::BuiltinOperator_MAX)
653 {
James Ward58dec6b2020-09-11 17:32:44 +0100654 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
655 "subgraph:{} operator idx:{}. {}",
656 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
657 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100658 }
659
660 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +0100661 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +0100662 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +0100663 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +0100664 }
telsoa01c577f2c2018-08-31 09:22:23 +0100665
Colm Donelan6350d272020-06-09 16:56:25 +0100666 SetupInputLayers(subgraphIndex);
667 SetupOutputLayers(subgraphIndex);
668 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100669
Colm Donelan6350d272020-06-09 16:56:25 +0100670 ++subgraphIndex;
671 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +0100672 }
telsoa01c577f2c2018-08-31 09:22:23 +0100673 }
Colm Donelan6350d272020-06-09 16:56:25 +0100674 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +0100675 {
Colm Donelan6350d272020-06-09 16:56:25 +0100676 std::stringstream errorString;
677 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
678 << subgraphIndex << " error: " << e.what();
679 ARMNN_LOG(error) << errorString.str();
680 std::stringstream errors;
681 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +0100682 throw ParseException(errors.str());
683 }
684
685 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +0100686 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100687 {
688 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
689 {
690 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
691 {
692 for (size_t inputSlotIdx = 0;
693 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
694 ++inputSlotIdx)
695 {
696 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
697 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
698 }
699 }
700 }
701 }
702
703 return std::move(m_Network);
704}
705
706void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
707 size_t tensorIndex,
708 armnn::IOutputSlot* slot)
709{
710 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100711 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
712 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100713
714 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
715
716 // assuming there is only one producer for that tensor
717 if (tensorSlots.outputSlot != nullptr)
718 {
James Ward58dec6b2020-09-11 17:32:44 +0100719 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
720 "subgraph:{} tensor:{} {}",
721 subgraphIndex,
722 tensorIndex,
723 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100724 }
725
726 tensorSlots.outputSlot = slot;
727}
728
729void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
730 size_t tensorIndex,
731 armnn::IInputSlot* slot)
732{
733 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100734 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
735 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100736
737 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
738 tensorSlots.inputSlots.push_back(slot);
739}
740
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100741void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
742{
743 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
744
745 // NOTE: By default we presume the custom operator is not supported
746 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
747
748 // Identify custom code defined for custom operator
749 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
750 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
751
752 // Find parser function that correspondes to custom code (if any)
753 auto iterator = m_CustomParserFunctions.find(customCode);
754 if (iterator != m_CustomParserFunctions.end())
755 {
756 customParserFunction = iterator->second;
757 }
758
759 // Run parser function
760 (this->*customParserFunction)(subgraphIndex, operatorIndex);
761}
762
telsoa01c577f2c2018-08-31 09:22:23 +0100763void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
764{
765 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100766
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100767 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
768
769 auto opcodeIndex = operatorPtr->opcode_index;
770 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
771
772 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
773 {
774 // Do not add StandInLayer, throw ParseException instead
775 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100776 fmt::format("Operator not supported. "
777 "subgraph:{} operator:{} "
778 "opcode_index:{} opcode:{} / {} {}",
779 subgraphIndex,
780 operatorIndex,
781 opcodeIndex,
782 opcode,
783 tflite::EnumNameBuiltinOperator(opcode),
784 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100785 }
786
787 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
788 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
789
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100790 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
791 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100792
793 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +0100794 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100795
796 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
797 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +0100798 ARMNN_ASSERT(layer != nullptr);
799
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100800 for (unsigned int i = 0u; i < numOutputs; ++i)
801 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100802 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100803 }
804
805 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
806 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
807
808 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
809 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100810}
811
telsoa01c577f2c2018-08-31 09:22:23 +0100812void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
813{
814 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
815
816 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
817 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
818
819 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
820
821 Convolution2dDescriptor desc;
822 desc.m_BiasEnabled = false;
823 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
824 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000825 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100826 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
827 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000828
telsoa01c577f2c2018-08-31 09:22:23 +0100829 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
830 CHECK_VALID_SIZE(inputs.size(), 2, 3);
831
832 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
833 CHECK_VALID_SIZE(outputs.size(), 1);
834
835 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
836 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
837
838 // assuming input is NHWC
839 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
840 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
841
842 // assuming the filter is OHWI : Output, H, W, Input
843 // which is essentially the same as NHWC
844 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
845 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
846
Pablo Tellof0bd6832019-04-26 17:58:13 +0100847 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
848 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
849 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
850 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100851
Matteo Martincigh747ef822018-12-18 09:26:39 +0000852 auto filterTensorAndData = CreateConstTensor(inputs[1],
853 filterTensorInfo,
854 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100855 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100856
James Ward58dec6b2020-09-11 17:32:44 +0100857 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100858
859 if (inputs.size() == 3)
860 {
861 desc.m_BiasEnabled = true;
862 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000863 auto biasTensorAndData = CreateConstTensor(inputs[2],
864 biasTensorInfo,
865 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100866 layer = m_Network->AddConvolution2dLayer(desc,
867 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100868 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100869 layerName.c_str());
870 }
871 else
872 {
873 layer = m_Network->AddConvolution2dLayer(desc,
874 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100875 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100876 layerName.c_str());
877 }
878
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100879 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100880
Sadik Armagand109a4d2020-07-28 10:42:13 +0100881 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +0000882 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100883
884 // register the input connection slots for the layer, connections are made after all layers have been created
885 // only the tensors for the inputs are relevant, exclude the const tensors
886 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000887 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100888
jimfly01c25411c2018-11-14 17:47:22 +0000889 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100890 // register the output connection slots for the layer, connections are made after all layers have been created
891 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
892 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
893}
894
895void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
896{
897 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
898
899 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
900 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
901
902 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
903
904 DepthwiseConvolution2dDescriptor desc;
905 desc.m_BiasEnabled = false;
906 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
907 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000908 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100909 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100910
911 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
912 CHECK_VALID_SIZE(inputs.size(), 2, 3);
913 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
914 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100915 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
916 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000917
Keith Davis0c2eeac2020-02-11 16:51:50 +0000918 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
919 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +0100920
telsoa01c577f2c2018-08-31 09:22:23 +0100921 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000922 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1], permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100923
Matteo Martincigh747ef822018-12-18 09:26:39 +0000924 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100925 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
926 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000927
928 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100929 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
930 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
931
Matteo Martincigh747ef822018-12-18 09:26:39 +0000932 // Reshape weights as [ H, W, I, M ]
933 filterTensorInfo.SetShape({ filterHeight,
934 filterWidth,
935 inputTensorInfo.GetShape()[3],
936 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
937
Pablo Tellof0bd6832019-04-26 17:58:13 +0100938 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
939 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
940 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
941 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100942
Matteo Martincigh747ef822018-12-18 09:26:39 +0000943 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100944 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +0100945 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100946
947 if (inputs.size() == 3)
948 {
949 desc.m_BiasEnabled = true;
950 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000951 auto biasTensorAndData = CreateConstTensor(inputs[2],
952 biasTensorInfo,
953 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100954 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
955 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100956 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100957 layerName.c_str());
958 }
959 else
960 {
961 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
962 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100963 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100964 layerName.c_str());
965 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100966 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100967
Sadik Armagand109a4d2020-07-28 10:42:13 +0100968 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +0000969 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100970
971 // register the input connection slots for the layer, connections are made after all layers have been created
972 // only the tensors for the inputs are relevant, exclude the const tensors
973 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000974 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100975
jimfly01c25411c2018-11-14 17:47:22 +0000976 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100977 // register the output connection slots for the layer, connections are made after all layers have been created
978 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
979 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
980}
981
Finn Williamsed66d142019-12-06 09:55:55 +0000982void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
983{
984 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
985
986 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
987 CHECK_VALID_SIZE(inputs.size(), 1);
988
989 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
990 CHECK_VALID_SIZE(outputs.size(), 1);
991
James Ward58dec6b2020-09-11 17:32:44 +0100992 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +0000993
994 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100995 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +0000996
Sadik Armagand109a4d2020-07-28 10:42:13 +0100997 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Finn Williamsed66d142019-12-06 09:55:55 +0000998 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
999
1000 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1001 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1002
1003 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1004 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1005}
1006
Derek Lambertif0176992020-04-28 13:37:49 +01001007void TfLiteParser::ParseExp(size_t subgraphIndex, size_t operatorIndex)
1008{
1009 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1010
1011 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1012 CHECK_VALID_SIZE(inputs.size(), 1);
1013
1014 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1015 CHECK_VALID_SIZE(outputs.size(), 1);
1016
James Ward58dec6b2020-09-11 17:32:44 +01001017 auto layerName = fmt::format("Exp:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01001018
1019 ElementwiseUnaryDescriptor desc;
1020 desc.m_Operation = UnaryOperation::Exp;
1021 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerName.c_str());
1022 ARMNN_ASSERT(layer != nullptr);
1023
Sadik Armagand109a4d2020-07-28 10:42:13 +01001024 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Derek Lambertif0176992020-04-28 13:37:49 +01001025 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1026
1027 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1028 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1029
1030 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1031 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1032}
1033
Keith Davis4cd29a02019-09-09 14:49:20 +01001034void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
1035{
1036 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1037
1038 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001039 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001040
1041 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1042 CHECK_VALID_SIZE(outputs.size(), 1);
1043
James Ward58dec6b2020-09-11 17:32:44 +01001044 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001045 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001046
josh minorba424d22019-11-13 10:55:17 -06001047 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001048 {
1049 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1050 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001051 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1052 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001053 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001054 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001055
Mike Kelly08759e22020-03-02 11:41:31 +00001056 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001057 }
1058
James Conroy05102392020-06-24 15:39:55 +01001059 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001060 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001061 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001062
James Conroy05102392020-06-24 15:39:55 +01001063 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001064 ARMNN_ASSERT(layer != nullptr);
Keith Davis4cd29a02019-09-09 14:49:20 +01001065 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1066
1067 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1068 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1069
1070 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1071 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1072}
1073
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001074void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
1075{
1076 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1077
1078 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1079 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1080
1081 TransposeConvolution2dDescriptor desc;
1082 desc.m_BiasEnabled = false;
1083 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1084 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1085 desc.m_DataLayout = armnn::DataLayout::NHWC;
1086
1087 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
David Monahan61683802021-01-12 09:11:07 +00001088 if (inputs.size() == 4)
1089 {
1090 desc.m_BiasEnabled = true;
1091 }
1092 else
1093 {
1094 CHECK_VALID_SIZE(inputs.size(), 3);
1095 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001096
1097 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1098 CHECK_VALID_SIZE(outputs.size(), 1);
1099
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001100 if (inputs[0])
1101 {
1102 armnn::TensorInfo tensorInfo = ToTensorInfo(inputs[0]);
1103 std::vector<int> output_shape(tensorInfo.GetNumElements());
1104 if (tensorInfo.GetDataType() == DataType::Signed32)
1105 {
1106 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1107 }
1108 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1109 {
1110 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1111 {
1112 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1113 }
1114 }
1115 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1116 for (int dimension : output_shape)
1117 {
1118 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1119 }
1120 desc.m_OutputShapeEnabled = true;
1121 }
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001122 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001123 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1124
1125 // TfLite uses NHWC tensors
1126 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1127 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1128
1129 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1130 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1131
1132 CalcPadding(inputHeight,
1133 filterHeight,
1134 desc.m_StrideY,
1135 1, // DilationY
1136 desc.m_PadTop,
1137 desc.m_PadBottom,
1138 options->padding);
1139
1140 CalcPadding(inputWidth,
1141 filterWidth,
1142 desc.m_StrideX,
1143 1, // DilationX
1144 desc.m_PadLeft,
1145 desc.m_PadRight,
1146 options->padding);
1147
1148 auto filterTensorAndData = CreateConstTensor(inputs[1],
1149 filterTensorInfo,
1150 armnn::Optional<armnn::PermutationVector&>());
1151
1152 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001153 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001154
David Monahan61683802021-01-12 09:11:07 +00001155 if (desc.m_BiasEnabled)
1156 {
1157 auto biasTensorInfo = ToTensorInfo(inputs[3]);
1158 auto biasConstTensor = CreateConstTensor(inputs[3],
1159 biasTensorInfo,
1160 armnn::Optional<armnn::PermutationVector&>());
1161 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1162 filterTensorAndData.first,
1163 biasConstTensor.first,
1164 layerName.c_str());
1165 }
1166 else
1167 {
1168 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1169 filterTensorAndData.first,
1170 EmptyOptional(),
1171 layerName.c_str());
1172 }
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001173
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001174 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001175
Sadik Armagand109a4d2020-07-28 10:42:13 +01001176 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001177 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1178
1179 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1180 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001181 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001182
1183 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1184 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1185}
1186
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001187void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
1188{
1189 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1190}
1191
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001192void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
1193{
1194 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1195
1196 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1197 CHECK_VALID_SIZE(inputs.size(), 3);
1198
1199 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1200 CHECK_VALID_SIZE(outputs.size(), 1);
1201
1202 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1203 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1204
1205 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1206 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1207
1208 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1209 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1210
1211 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1212 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1213
1214 size_t step = 2;
1215 std::vector<std::pair<unsigned int, unsigned int>> crops;
1216 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1217 {
1218 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1219 }
1220
1221 armnn::BatchToSpaceNdDescriptor desc;
1222 desc.m_BlockShape = blockShape;
1223 desc.m_Crops = crops;
1224 desc.m_DataLayout = armnn::DataLayout::NHWC;
1225
James Ward58dec6b2020-09-11 17:32:44 +01001226 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001227
James Conroy05102392020-06-24 15:39:55 +01001228 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001229 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001230 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1231
1232 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1233 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001234 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1235
1236 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1237 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1238
1239 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1240 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1241}
1242
Matthew Jackson28c94572019-07-18 10:47:03 +01001243void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1244{
1245 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1246
1247 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1248 CHECK_VALID_SIZE(inputs.size(), 1);
1249
1250 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1251 CHECK_VALID_SIZE(outputs.size(), 1);
1252
1253 L2NormalizationDescriptor desc;
1254 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001255 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001256 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1257
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001258 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001259
Sadik Armagand109a4d2020-07-28 10:42:13 +01001260 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson28c94572019-07-18 10:47:03 +01001261 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1262
1263 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1264 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1265
1266 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1267 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1268}
1269
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001270void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1271{
1272 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1273}
1274
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001275void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1276{
1277 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1278
1279 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1280 CHECK_VALID_SIZE(inputs.size(), 2);
1281
1282 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1283 CHECK_VALID_SIZE(outputs.size(), 1);
1284
James Ward58dec6b2020-09-11 17:32:44 +01001285 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001286
1287 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1288 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1289 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001290
Sadik Armagand109a4d2020-07-28 10:42:13 +01001291 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001292 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1293
1294 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1295 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001296 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1297
1298 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001299 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001300
1301 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1302 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1303}
1304
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001305void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1306{
1307 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1308
1309 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1310 CHECK_VALID_SIZE(inputs.size(), 2);
1311
1312 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1313 CHECK_VALID_SIZE(outputs.size(), 1);
1314
James Ward58dec6b2020-09-11 17:32:44 +01001315 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001316
1317 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1318 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1319 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001320
Sadik Armagand109a4d2020-07-28 10:42:13 +01001321 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001322 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1323
1324 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1325 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001326 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1327
1328 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001329 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001330
1331 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1332 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1333}
1334
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001335void TfLiteParser::ParsePool(size_t subgraphIndex,
1336 size_t operatorIndex,
1337 PoolingAlgorithm algorithm)
1338{
1339 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1340
1341 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1342 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1343
1344 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1345
1346 std::string layerName;
1347
1348 switch (algorithm)
1349 {
1350 case PoolingAlgorithm::Average:
1351 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001352 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001353 break;
1354 case PoolingAlgorithm::Max:
1355 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001356 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001357 break;
1358 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001359 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001360 }
1361
1362 Pooling2dDescriptor desc;
1363
1364 desc.m_PoolType = algorithm;
1365 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1366 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1367 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1368 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1369 desc.m_PaddingMethod = PaddingMethod::Exclude;
1370 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001371 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001372
1373 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1374 CHECK_VALID_SIZE(inputs.size(), 1);
1375 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1376
1377 // assuming input is NHWC
1378 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1379 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1380
Pablo Tellof0bd6832019-04-26 17:58:13 +01001381 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1382 desc.m_PadTop, desc.m_PadBottom, options->padding);
1383 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1384 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001385
1386 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1387 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001388
Sadik Armagand109a4d2020-07-28 10:42:13 +01001389 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001390 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1391
1392 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1393 ARMNN_ASSERT(layer != nullptr);
jimfly01c25411c2018-11-14 17:47:22 +00001394 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001395
1396 // register the input connection slots for the layer, connections are made after all layers have been created
1397 // only the tensors for the inputs are relevant, exclude the const tensors
1398 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001399 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001400
jimfly01c25411c2018-11-14 17:47:22 +00001401 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001402 // register the output connection slots for the layer, connections are made after all layers have been created
1403 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1404 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1405}
1406
josh minorba424d22019-11-13 10:55:17 -06001407void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
1408{
1409 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1410
1411 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1412 CHECK_VALID_SIZE(inputs.size(), 3);
1413 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1414 CHECK_VALID_SIZE(outputs.size(), 1);
1415
1416 SliceDescriptor desc;
1417
1418 // set begin tensor info for slice descriptor
1419 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1420 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1421
1422 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1423 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1424
1425 // set size tensor info for slice descriptor
1426 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1427 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1428
1429 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1430 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1431 desc = SliceDescriptor(begin, size);
1432
James Ward58dec6b2020-09-11 17:32:44 +01001433 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06001434
James Conroy05102392020-06-24 15:39:55 +01001435 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001436 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001437 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1438
1439 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
josh minorba424d22019-11-13 10:55:17 -06001440 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1441
1442 // register the input connection slots for the layer, connections are made after all layers have been created
1443 // only the tensors for the inputs are relevant, exclude the const tensors
1444 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1445 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1446
1447 // register the output connection slots for the layer, connections are made after all layers have been created
1448 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1449 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1450}
1451
telsoa01c577f2c2018-08-31 09:22:23 +01001452void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1453{
1454 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1455 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1456 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1457
1458 SoftmaxDescriptor desc;
1459 desc.m_Beta = options->beta;
1460
1461 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1462 CHECK_VALID_SIZE(inputs.size(), 1);
1463 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1464 CHECK_VALID_SIZE(outputs.size(), 1);
1465
James Ward58dec6b2020-09-11 17:32:44 +01001466 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001467 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1468
Sadik Armagand109a4d2020-07-28 10:42:13 +01001469 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
telsoa01c577f2c2018-08-31 09:22:23 +01001470 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1471
1472 // register the input connection slots for the layer, connections are made after all layers have been created
1473 // only the tensors for the inputs are relevant, exclude the const tensors
1474 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1475 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1476
1477 // register the output connection slots for the layer, connections are made after all layers have been created
1478 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1479 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1480}
1481
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001482void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1483{
1484 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1485
1486 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1487 CHECK_VALID_SIZE(inputs.size(), 3);
1488
1489 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1490 CHECK_VALID_SIZE(outputs.size(), 1);
1491
1492 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1493 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1494
1495 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1496 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1497
1498 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1499 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1500
1501 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1502 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1503
1504 size_t step = 2;
1505 std::vector<std::pair<unsigned int, unsigned int>> padList;
1506 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1507 {
1508 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1509 }
1510
1511 armnn::SpaceToBatchNdDescriptor desc;
1512 desc.m_BlockShape = blockShape;
1513 desc.m_PadList = padList;
1514 desc.m_DataLayout = armnn::DataLayout::NHWC;
1515
James Ward58dec6b2020-09-11 17:32:44 +01001516 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001517
James Conroy05102392020-06-24 15:39:55 +01001518 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001519 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001520 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1521
1522 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1523 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001524 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1525
1526 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1527 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1528
1529 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1530 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1531}
1532
telsoa01c577f2c2018-08-31 09:22:23 +01001533armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1534 const armnn::TensorInfo & inputTensorInfo)
1535{
1536 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1537 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1538 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1539
1540 if (inputTensorInfo.GetNumDimensions() > 4)
1541 {
1542 std::stringstream ss;
1543 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1544 << " shape:" << inputTensorInfo.GetShape() << " "
1545 << CHECK_LOCATION().AsString();
1546 throw ParseException(ss.str());
1547 }
1548
1549 if (squeezeDims.empty())
1550 {
1551 squeezeDims.assign(dimensionSequence,
1552 dimensionSequence+inputTensorInfo.GetNumDimensions());
1553 }
1554
1555 std::vector<uint32_t> outputDims;
1556 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1557 {
1558 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1559 auto currentDimension = inputTensorInfo.GetShape()[i];
1560 if (skipSqueeze || currentDimension != 1)
1561 {
1562 outputDims.push_back(currentDimension);
1563 }
1564 }
1565
1566 if (outputDims.size() > 4)
1567 {
1568 std::stringstream ss;
1569 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1570 << " shape:" << inputTensorInfo.GetShape() << " "
1571 << CHECK_LOCATION().AsString();
1572 throw ParseException(ss.str());
1573 }
1574
1575 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1576 outputDims.data());
1577
1578 // we need to preserve the tensor type and the quantization data as well
1579 TensorInfo outTensorInfo = inputTensorInfo;
1580 outTensorInfo.SetShape(outShape);
1581
1582 return outTensorInfo;
1583}
1584
1585void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1586{
1587 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1588
1589 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1590 CHECK_VALID_SIZE(inputs.size(), 1);
1591
1592 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1593 CHECK_VALID_SIZE(outputs.size(), 1);
1594
1595 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1596 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01001597 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001598
1599 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1600 armnn::TensorInfo outputTensorInfo =
1601 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1602 inputTensorInfo);
James Conroy05102392020-06-24 15:39:55 +01001603 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01001604
1605 ReshapeDescriptor reshapeDesc;
1606 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1607
telsoa01c577f2c2018-08-31 09:22:23 +01001608 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001609 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001610 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1611
1612 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1613 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1614
1615 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1616 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1617}
1618
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001619void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1620{
1621 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1622
1623 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1624 CHECK_VALID_SIZE(inputs.size(), 4);
1625
1626 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1627 CHECK_VALID_SIZE(outputs.size(), 1);
1628
1629 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1630 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1631
1632 StridedSliceDescriptor desc;
1633 desc.m_BeginMask = options->begin_mask;
1634 desc.m_EllipsisMask = options->ellipsis_mask;
1635 desc.m_EndMask = options->end_mask;
1636 desc.m_NewAxisMask = options->new_axis_mask;
1637 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1638 desc.m_DataLayout = armnn::DataLayout::NHWC;
1639
1640 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1641 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1642
1643 std::vector<int> begin(beginTensorInfo.GetNumElements());
1644 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1645
1646 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1647 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1648
1649 std::vector<int> end(endTensorInfo.GetNumElements());
1650 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1651
1652 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1653 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1654
1655 std::vector<int> stride(strideTensorInfo.GetNumElements());
1656 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1657
1658 desc.m_Begin = begin;
1659 desc.m_End = end;
1660 desc.m_Stride = stride;
1661
James Ward58dec6b2020-09-11 17:32:44 +01001662 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001663 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001664 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001665
Sadik Armagand109a4d2020-07-28 10:42:13 +01001666 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001667 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1668
1669 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1670 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1671
1672 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1673 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1674}
1675
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001676void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1677{
1678 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1679
1680 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1681 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1682
1683 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1684 CHECK_VALID_SIZE(inputs.size(), 2);
1685
1686 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1687 CHECK_VALID_SIZE(outputs.size(), 1);
1688
1689 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1690 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1691
James Ward58dec6b2020-09-11 17:32:44 +01001692 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001693 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001694 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001695
Sadik Armagand109a4d2020-07-28 10:42:13 +01001696 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001697 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1698
1699 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001700 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001701
1702 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1703
1704 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1705 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1706}
1707
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301708void TfLiteParser::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
1709{
1710 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1711
1712 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1713 const auto * options = operatorPtr->builtin_options.AsDivOptions();
1714
1715 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1716 CHECK_VALID_SIZE(inputs.size(), 2);
1717
1718 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1719 CHECK_VALID_SIZE(outputs.size(), 1);
1720
1721 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1722 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1723
James Ward58dec6b2020-09-11 17:32:44 +01001724 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301725 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001726 ARMNN_ASSERT(layer != nullptr);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301727
Sadik Armagand109a4d2020-07-28 10:42:13 +01001728 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301729 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1730
1731 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001732 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301733 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1734
1735 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1736 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1737}
1738
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001739void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1740{
1741 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1742
1743 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1744 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1745
1746 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1747 CHECK_VALID_SIZE(inputs.size(), 2);
1748
1749 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1750 CHECK_VALID_SIZE(outputs.size(), 1);
1751
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001752 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1753 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1754
James Ward58dec6b2020-09-11 17:32:44 +01001755 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001756 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001757 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001758
Sadik Armagand109a4d2020-07-28 10:42:13 +01001759 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001760 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1761
1762 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001763 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001764 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1765
1766 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1767 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1768}
1769
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001770void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1771{
1772 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1773
1774 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1775 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1776
1777 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1778 CHECK_VALID_SIZE(inputs.size(), 2);
1779
1780 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1781 CHECK_VALID_SIZE(outputs.size(), 1);
1782
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001783 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1784 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1785
James Ward58dec6b2020-09-11 17:32:44 +01001786 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001787 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001788 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001789
Sadik Armagand109a4d2020-07-28 10:42:13 +01001790 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001791 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1792
1793 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001794 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001795 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1796
1797 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1798 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1799}
1800
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001801void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1802{
1803 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1804
1805 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1806
1807 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1808 CHECK_VALID_SIZE(outputs.size(), 1);
1809
1810 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1811 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1812
1813 armnn::MeanDescriptor desc;
1814 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1815 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1816 desc.m_Axis = axis;
1817
1818 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001819 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001820
1821 desc.m_KeepDims =
1822 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1823 true : false;
1824
James Ward58dec6b2020-09-11 17:32:44 +01001825 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001826 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001827 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001828
1829 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1830
1831 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1832 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1833
1834 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1835 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1836}
1837
Darshan Patel83fcf982020-05-26 22:22:42 +05301838void TfLiteParser::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
1839{
1840 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1841
1842 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1843 CHECK_VALID_SIZE(inputs.size(), 1);
1844
1845 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1846 CHECK_VALID_SIZE(outputs.size(), 1);
1847
James Ward58dec6b2020-09-11 17:32:44 +01001848 auto layerName = fmt::format("Neg:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel83fcf982020-05-26 22:22:42 +05301849 armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Neg);
1850 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
1851 ARMNN_ASSERT(layer != nullptr);
1852
Sadik Armagand109a4d2020-07-28 10:42:13 +01001853 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel83fcf982020-05-26 22:22:42 +05301854 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1855
1856 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1857 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1858
1859 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1860 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1861}
1862
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001863void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1864{
1865 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1866
1867 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1868
1869 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1870 CHECK_VALID_SIZE(outputs.size(), 1);
1871
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00001872 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1873
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001874 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1875 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1876
1877 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1878 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1879
1880 size_t step = 2;
1881 armnn::PadDescriptor desc;
Narumol Prangnawarat8719d222020-11-27 16:57:56 +00001882 if (inputTensorInfo.IsQuantized())
1883 {
1884 desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
1885 }
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001886 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1887 {
1888 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1889 }
1890
James Ward58dec6b2020-09-11 17:32:44 +01001891 auto layerName = fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001892 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001893
1894 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1895 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001896 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1897
1898 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1899 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1900
1901 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1902 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1903}
1904
Sadik Armagan66dedc72019-12-10 16:32:07 +00001905void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
1906{
1907 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1908
1909 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1910 CHECK_VALID_SIZE(inputs.size(), 1);
1911
1912 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1913 CHECK_VALID_SIZE(outputs.size(), 1);
1914
James Ward58dec6b2020-09-11 17:32:44 +01001915 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001916
1917 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001918 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001919
Sadik Armagand109a4d2020-07-28 10:42:13 +01001920 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001921 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1922
1923 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1924 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1925
1926 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1927 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1928}
Finn Williamsc42c3842019-01-22 14:18:11 +00001929
Sadik Armagan58f39192018-09-17 14:14:39 +01001930void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1931{
Finn Williamsc42c3842019-01-22 14:18:11 +00001932 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001933}
1934
1935void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1936{
Finn Williamsc42c3842019-01-22 14:18:11 +00001937 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1938}
Sadik Armagan58f39192018-09-17 14:14:39 +01001939
Sadik Armagan12239e72020-05-27 11:06:17 +01001940void TfLiteParser::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
1941{
Jan Eilers2f746b32020-07-28 14:00:06 +01001942 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01001943}
1944
Finn Williamsc42c3842019-01-22 14:18:11 +00001945void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1946{
1947 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1948}
1949
Nina Drozd99851762019-04-09 09:37:38 +01001950void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1951{
1952 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1953}
1954
Matthew Sloyan7515d072020-12-16 12:50:01 +00001955void TfLiteParser::ParseElu(size_t subgraphIndex, size_t operatorIndex)
1956{
1957 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
1958}
1959
Jan Eilers2f746b32020-07-28 14:00:06 +01001960void TfLiteParser::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
1961{
1962 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
1963}
Finn Williamsc42c3842019-01-22 14:18:11 +00001964
1965void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1966{
1967 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001968 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00001969 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01001970
1971 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1972 CHECK_VALID_SIZE(inputs.size(), 1);
1973
1974 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1975 CHECK_VALID_SIZE(outputs.size(), 1);
1976
James Ward58dec6b2020-09-11 17:32:44 +01001977 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01001978 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001979 activationDesc.m_Function = activationType;
1980
1981 switch (activationType)
1982 {
1983 case ActivationFunction::ReLu:
1984 {
James Ward58dec6b2020-09-11 17:32:44 +01001985 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00001986 break;
1987 }
1988 case ActivationFunction::BoundedReLu:
1989 {
James Ward58dec6b2020-09-11 17:32:44 +01001990 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00001991 activationDesc.m_A = 6.0f;
1992 activationDesc.m_B = 0.0f;
1993 break;
1994 }
1995 case ActivationFunction::Sigmoid:
1996 {
James Ward58dec6b2020-09-11 17:32:44 +01001997 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00001998 break;
1999 }
Nina Drozd99851762019-04-09 09:37:38 +01002000 case ActivationFunction::TanH:
2001 {
James Ward58dec6b2020-09-11 17:32:44 +01002002 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01002003 activationDesc.m_A = 1.0f;
2004 activationDesc.m_B = 1.0f;
2005 break;
2006 }
Sadik Armagan12239e72020-05-27 11:06:17 +01002007 case ActivationFunction::LeakyReLu:
2008 {
James Ward58dec6b2020-09-11 17:32:44 +01002009 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan12239e72020-05-27 11:06:17 +01002010 const auto * options = operatorPtr->builtin_options.AsLeakyReluOptions();
2011 activationDesc.m_A = options->alpha;
2012 break;
2013 }
Matthew Sloyan7515d072020-12-16 12:50:01 +00002014 case ActivationFunction::Elu:
2015 {
2016 layerName += fmt::format("ELU:{}:{}", subgraphIndex, operatorIndex);
2017 activationDesc.m_A = 1.0f;
2018 break;
2019 }
Jan Eilers2f746b32020-07-28 14:00:06 +01002020 case ActivationFunction::HardSwish:
Matthew Sloyan7515d072020-12-16 12:50:01 +00002021 {
James Ward58dec6b2020-09-11 17:32:44 +01002022 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01002023 break;
Matthew Sloyan7515d072020-12-16 12:50:01 +00002024 }
Finn Williamsc42c3842019-01-22 14:18:11 +00002025 default:
2026 {
2027 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002028 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
2029 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00002030 }
2031 }
2032
2033 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01002034
Sadik Armagand109a4d2020-07-28 10:42:13 +01002035 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan58f39192018-09-17 14:14:39 +01002036 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2037
2038 // register the input connection slots for the layer, connections are made after all layers have been created
2039 // only the tensors for the inputs are relevant, exclude the const tensors
2040 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2041 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2042
2043 // register the output connection slots for the layer, connections are made after all layers have been created
2044 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2045 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2046}
Sadikb94967b2018-09-19 15:30:00 +01002047armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
2048 const std::vector<int32_t> & targetDimsIn)
2049{
2050 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2051 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2052
2053 if (stretchDim != targetDimsIn.end())
2054 {
2055 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2056 {
2057 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002058 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01002059 }
2060
2061 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002062 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01002063 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2064
2065 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2066 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2067 }
2068
2069 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2070
2071 TensorInfo reshapeInfo = inputTensorInfo;
2072 reshapeInfo.SetShape(outputShape);
2073
2074 return reshapeInfo;
2075}
2076
2077void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
2078{
2079 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2080
2081 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002082
2083 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2084 CHECK_VALID_SIZE(outputs.size(), 1);
2085
2086 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2087 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002088 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002089
2090 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00002091 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01002092 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00002093
Jan Eilersbac9b352020-07-13 13:40:24 +01002094 // Extracting new shape for the output
2095 // There are two ways it can be passed
2096 // * First is to define the target shape in the operator built-in options
2097 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00002098 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01002099 bool targetShapeFound = false;
2100 // Check if built-in options were given
2101 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00002102 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002103 // make sure the parameter is given
2104 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00002105 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002106 targetShape = options->new_shape;
2107 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00002108 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002109 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002110
2111 // If there is no built-in option given or if the built-in new_shape parameter was empty
2112 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00002113 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002114 // Check for a second input tensor
2115 if (inputs.size() > 1 && inputs[1] != nullptr)
2116 {
2117 if (inputs[1]->is_variable)
2118 {
2119 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
2120 }
2121
2122 if (inputs[1]->shape.size() != 1)
2123 {
2124 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
2125 }
2126
2127 if (inputs[1]->type != tflite::TensorType_INT32)
2128 {
2129 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
2130 }
2131
2132 // Extract target shape from input
2133 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2134 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
Sadik Armagan19a1c032021-01-20 12:17:00 +00002135 if (!values)
2136 {
2137 ARMNN_THROW_PARSE_EXCEPTION("Reshape operator target shape input buffer data is null");
2138 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002139 for (int i=0; i < inputs[1]->shape[0]; ++i)
2140 {
2141 targetShape.push_back(values[i]);
2142 }
2143 }
2144 else
Derek Lambertic9e52792020-03-11 11:42:26 +00002145 {
2146 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
2147 "At least one method required");
2148 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002149 }
2150
kevmay0171972a82018-12-17 14:28:03 +00002151 armnn::TensorInfo reshapeOutputTensorInfo =
Derek Lambertic9e52792020-03-11 11:42:26 +00002152 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01002153
kevmay0171972a82018-12-17 14:28:03 +00002154 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002155 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
2156 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00002157 {
2158 std::stringstream ss;
2159 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002160 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00002161 << " does not equal output shape "
2162 << actualOutputTensorInfo.GetShape()
2163 << ": "
2164 << CHECK_LOCATION().AsString();
2165 throw ParseException(ss.str());
2166 }
2167
Sadikb94967b2018-09-19 15:30:00 +01002168 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00002169 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01002170
Sadikb94967b2018-09-19 15:30:00 +01002171 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002172 ARMNN_ASSERT(layer != nullptr);
kevmay0171972a82018-12-17 14:28:03 +00002173 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01002174
2175 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2176 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2177
2178 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2179 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2180}
2181
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002182void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
2183{
Sadik Armagana3b31f02019-12-05 09:08:53 +00002184 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
2185}
2186
2187void TfLiteParser::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
2188{
2189 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
2190}
2191
2192void TfLiteParser::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
2193{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002194 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2195
2196 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2197 CHECK_VALID_SIZE(inputs.size(), 2);
2198
2199 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2200 CHECK_VALID_SIZE(outputs.size(), 1);
2201
2202 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
2203
2204 // Data for the parsed tensor args (size) must be stored locally.
2205 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2206
2207 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2208 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2209
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002210 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002211 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002212 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002213 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2214 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002215
James Ward58dec6b2020-09-11 17:32:44 +01002216 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00002217
2218 switch (resizeMethod)
2219 {
2220 case ResizeMethod::Bilinear:
2221 {
James Ward58dec6b2020-09-11 17:32:44 +01002222 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002223
2224 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2225 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2226
David Monahan4a0c9b92020-05-30 09:48:39 +01002227 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002228 break;
2229 }
2230 case ResizeMethod::NearestNeighbor:
2231 {
James Ward58dec6b2020-09-11 17:32:44 +01002232 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00002233 break;
2234 }
2235 default:
2236 {
2237 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002238 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
2239 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00002240 }
2241 }
2242
James Conroy05102392020-06-24 15:39:55 +01002243 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002244 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002245 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2246
2247 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
2248 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002249 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2250
2251 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2252 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2253
2254 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2255 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2256}
2257
Sadik Armagan479045b2018-10-01 11:51:37 +01002258void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
2259{
2260 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2261
2262 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2263 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2264
2265 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2266
2267 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2268 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2269 CHECK_VALID_SIZE(outputs.size(), 1);
2270
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002271 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2272 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002273
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002274 const unsigned int concatDimInput = static_cast<unsigned int>(
2275 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002276
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002277 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2278 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002279
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002280 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002281
2282 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2283 {
2284 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2285
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002286 // This set up concatDescriptor view origin
2287 armnnUtils::ProcessConcatInputTensorInfo(
2288 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002289 }
2290
James Ward58dec6b2020-09-11 17:32:44 +01002291 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002292 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002293
Jim Flynn906f9462019-05-10 13:55:21 +01002294 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002295 ARMNN_ASSERT(layer != nullptr);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002296 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002297
James Conroy05102392020-06-24 15:39:55 +01002298 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002299 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002300
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002301 // add fused activation layer
2302 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002303
Sadik Armagan479045b2018-10-01 11:51:37 +01002304 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2305 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2306}
2307
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002308void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
2309{
2310 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2311
2312 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2313 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2314
2315 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2316
2317 FullyConnectedDescriptor desc;
2318 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002319 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002320
2321 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2322 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2323 CHECK_VALID_SIZE(outputs.size(), 1);
2324
2325 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2326
2327 // Fully Connected Layer accepts two dimensional weights input
2328 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2329 if (weightsDimension != 2)
2330 {
2331 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002332 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
2333 "Node {}",
2334 weightsDimension,
2335 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002336 }
2337
Matteo Martincigh747ef822018-12-18 09:26:39 +00002338 auto filterTensorAndData = CreateConstTensor(inputs[1],
2339 filterTensorInfo,
2340 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002341 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01002342 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002343
2344 if (inputs.size() == 3)
2345 {
2346 desc.m_BiasEnabled = true;
2347 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00002348 auto biasTensorAndData = CreateConstTensor(inputs[2],
2349 biasTensorInfo,
2350 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002351 layer = m_Network->AddFullyConnectedLayer(desc,
2352 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002353 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002354 layerName.c_str());
2355 }
2356 else
2357 {
2358 layer = m_Network->AddFullyConnectedLayer(desc,
2359 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002360 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002361 layerName.c_str());
2362 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002363 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002364
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002365 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2366
2367 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2368
2369 if (inputTensorInfo.GetNumDimensions() > 2)
2370 {
2371 // Add reshape to flatten to 2D [batch_size, input_size],
2372 // where "input_size" corresponds to the number of inputs to the layer,
2373 // matching the second dimension of weights,
2374 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2375 std::vector<unsigned int> reshapedDimensions(2);
2376 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2377 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2378
2379 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2380 {
2381 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002382 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
2383 reshapedDimensions[1],
2384 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002385 }
2386
2387 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2388 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2389
James Ward58dec6b2020-09-11 17:32:44 +01002390 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002391 armnn::ReshapeDescriptor desc;
2392 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2393 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2394
2395 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2396 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2397
2398 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2399 }
2400 else
2401 {
2402 // register the input connection slot for the layer
2403 // only the tensors for the inputs are relevant, exclude the const tensors
2404 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2405 }
2406
Sadik Armagand109a4d2020-07-28 10:42:13 +01002407 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002408 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2409
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002410 // we need to add the activation layer and fortunately we don't need to care about the data layout
2411 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2412 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002413
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002414 // register the output connection slots for the layer, connections are made after all layers have been created
2415 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2416 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2417}
2418
keidav011b3e2ea2019-02-21 10:07:37 +00002419void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
2420{
2421 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2422
2423 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2424
2425 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2426 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2427 CHECK_VALID_SIZE(outputs.size(), 4);
2428
2429 // Obtain custom options from flexbuffers
2430 auto custom_options = operatorPtr->custom_options;
2431 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2432
2433 // Obtain descriptor information from tf lite
2434 DetectionPostProcessDescriptor desc;
2435 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2436 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2437 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2438 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2439 desc.m_NumClasses = m["num_classes"].AsUInt32();
2440 desc.m_ScaleH = m["h_scale"].AsFloat();
2441 desc.m_ScaleW = m["w_scale"].AsFloat();
2442 desc.m_ScaleX = m["x_scale"].AsFloat();
2443 desc.m_ScaleY = m["y_scale"].AsFloat();
2444
keidav0107d58c72019-02-26 11:57:39 +00002445 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002446 {
keidav0107d58c72019-02-26 11:57:39 +00002447 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002448 }
2449 if (!(m["detections_per_class"].IsNull()))
2450 {
2451 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2452 }
2453
2454 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2455 {
2456 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2457 "must be positive and less than or equal to 1.");
2458 }
2459
2460 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2461 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2462 armnn::Optional<armnn::PermutationVector&>());
2463
James Ward58dec6b2020-09-11 17:32:44 +01002464 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
keidav011b3e2ea2019-02-21 10:07:37 +00002465 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2466 layerName.c_str());
2467
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002468 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00002469
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002470 // The model does not specify the output shapes.
2471 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2472 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2473 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2474 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2475 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2476 m_OverridenOutputShapes.push_back({ 1 });
2477
keidav011b3e2ea2019-02-21 10:07:37 +00002478 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2479 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002480 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002481 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2482 }
2483
2484 // Register the input connection slots for the layer, connections are made after all layers have been created
2485 // only the tensors for the inputs are relevant, exclude the const tensors
2486 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2487 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2488
2489 // Register the output connection slots for the layer, connections are made after all layers have been created
2490 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2491 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2492 outputTensorIndexes[1],
2493 outputTensorIndexes[2],
2494 outputTensorIndexes[3]});
2495}
2496
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002497/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2498void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2499{
2500 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2501
2502 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2503 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2504 CHECK_VALID_SIZE(outputs.size(), 1);
2505
2506 if (inputs.size() < 1)
2507 {
2508 throw ParseException("Pack must have at least one input.");
2509 }
2510
2511 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2512 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2513
2514 StackDescriptor desc;
2515 desc.m_Axis = static_cast<uint32_t>(options->axis);
2516 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2517
2518 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2519 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2520 desc.m_InputShape = inputTensorInfo.GetShape();
2521
James Ward58dec6b2020-09-11 17:32:44 +01002522 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002523 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2524
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002525 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002526
Sadik Armagand109a4d2020-07-28 10:42:13 +01002527 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002528 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2529
2530 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2531 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2532
2533 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2534 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2535}
2536
Nina Drozd200e3802019-04-15 09:47:39 +01002537void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2538{
2539 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2540
2541 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2542 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2543
2544 // This unpackAxis indicates the axis to unpack
2545 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2546
2547 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2548 CHECK_VALID_SIZE(inputs.size(), 1);
2549
2550 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002551
2552 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2553 {
2554 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002555 fmt::format("The unpack axis: {} cannot be greater than or equal to "
2556 "the number of input dimension {} {}",
2557 unpackAxis,
2558 inputTensorInfo.GetNumDimensions(),
2559 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002560 }
2561
Nina Drozd200e3802019-04-15 09:47:39 +01002562 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2563 // If num is not defined, automatically infer from the length of the dimension axis.
2564 if(unpackNum == 0)
2565 {
2566 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2567 }
2568
2569 // If unpack number cannot be inferred and is still zero, throw ParseException.
2570 if(unpackNum == 0)
2571 {
2572 throw ParseException("Number to unpack must greater than zero.");
2573 }
2574
2575 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2576 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2577
2578 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2579 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2580
2581 // Add current input shape to unpackDimSizes
2582 for (unsigned int i = 0; i < inputDimSize; ++i)
2583 {
2584 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2585 }
2586
2587 if (unpackDimSizes[unpackAxis] != unpackNum)
2588 {
2589 throw ParseException("Number to unpack must be the same as length of the dimension to "
2590 "unpack along.");
2591 }
2592
2593 unpackDimSizes[unpackAxis] /= unpackNum;
2594
2595 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2596 for (unsigned int j = 0; j < unpackNum; ++j)
2597 {
2598 // Set the size of the views.
2599 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2600 {
2601 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2602 }
2603 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2604 }
2605
James Ward58dec6b2020-09-11 17:32:44 +01002606 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01002607 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002608 ARMNN_ASSERT(layer != nullptr);
Nina Drozd200e3802019-04-15 09:47:39 +01002609
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002610 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2611 unpackDimSizes.data());
2612
Nina Drozd200e3802019-04-15 09:47:39 +01002613 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2614 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2615
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002616 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2617 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2618 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002619 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01002620 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002621 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002622 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002623 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2624
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002625 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2626 outputTensorInfo.GetDataType(),
2627 outputTensorInfo.GetQuantizationScale(),
2628 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002629 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2630
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002631 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002632
2633 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2634 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2635 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2636 }
Nina Drozd200e3802019-04-15 09:47:39 +01002637}
2638
Nina Drozd0324f482019-04-08 10:52:10 +01002639void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2640{
2641 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2642
2643 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2644 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2645
2646 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2647
Nina Drozd200e3802019-04-15 09:47:39 +01002648 // If number of splits cannot be inferred and is zero, throw ParseException.
2649 if(numSplits == 0)
2650 {
2651 throw ParseException("Number to splits must greater than zero.");
2652 }
2653
Nina Drozd0324f482019-04-08 10:52:10 +01002654 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2655 CHECK_VALID_SIZE(inputs.size(), 2);
2656 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2657 CHECK_VALID_SIZE(outputs.size(), numSplits);
2658
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002659 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2660 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002661
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002662 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2663 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2664 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2665
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002666 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002667 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002668
Nina Drozd0324f482019-04-08 10:52:10 +01002669 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002670 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002671 {
2672 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002673 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
2674 inputTensorInfo.GetNumDimensions(),
2675 MaxNumOfTensorDimensions,
2676 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01002677 }
2678
2679 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2680
2681 // Add current input shape to splitterDimSizes
2682 for (unsigned int i = 0; i < inputDimSize; ++i)
2683 {
2684 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2685 }
2686
2687 if (splitterDimSizes[splitDim] % numSplits != 0)
2688 {
2689 throw ParseException("Number of splits must evenly divide the dimension");
2690 }
2691 splitterDimSizes[splitDim] /= numSplits;
2692
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002693 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002694 for (unsigned int j = 0; j < numSplits; ++j)
2695 {
2696 // Set the size of the views.
2697 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2698 {
2699 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2700 }
2701 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2702 }
2703
James Ward58dec6b2020-09-11 17:32:44 +01002704 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01002705 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002706 ARMNN_ASSERT(layer != nullptr);
Nina Drozd0324f482019-04-08 10:52:10 +01002707
2708 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002709 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002710
Nina Drozd0324f482019-04-08 10:52:10 +01002711 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2712 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002713 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002714 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002715 }
2716
2717 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2718 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2719}
2720
Derek Lambertif0176992020-04-28 13:37:49 +01002721unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
2722{
2723 int numDims = armnn::numeric_cast<int>(numDimsIn);
2724 int v = idx < 0 ? numDims + idx : idx;
2725 ARMNN_ASSERT(v >= 0);
2726 ARMNN_ASSERT(v < numDims);
2727
2728 return static_cast<unsigned int>(v);
2729}
2730
2731void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
2732{
2733 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2734
2735 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Ryan OShea86704732020-05-26 11:41:04 +01002736 const auto * options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01002737
2738 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2739 CHECK_VALID_SIZE(inputs.size(), 3);
2740
2741 auto& inputTensor = inputs[0];
2742 auto& splitsTensor = inputs[1];
2743 auto& axisTensor = inputs[2];
2744
2745 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
2746 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
2747 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
2748 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
2749
2750 // Inputs
2751 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2752 if (inputDimSize > MaxNumOfTensorDimensions)
2753 {
2754 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002755 fmt::format("The number of dimensions: {} for input tensors of the "
2756 "SplitV op cannot be greater than {} {}",
2757 inputTensorInfo.GetNumDimensions(),
2758 MaxNumOfTensorDimensions,
2759 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01002760 }
2761
2762 // Get split axis
2763 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
2764 std::vector<int> axisData(axisTensorInfo.GetNumElements());
2765 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2766 const unsigned int splitDim = ComputeWrappedIndex(axisData[0], inputTensorInfo.GetNumDimensions());
2767
Derek Lambertif0176992020-04-28 13:37:49 +01002768 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01002769 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01002770 unsigned int numSplits{0};
2771
2772 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01002773 {
2774 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01002775 }
2776 else
2777 {
Ryan OShea86704732020-05-26 11:41:04 +01002778 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01002779 }
2780
2781 if (numSplits <=0)
2782 {
2783 throw ParseException("SplitV has invalid number of splits");
2784 }
2785
Jan Eilersc0761e92020-06-29 16:48:44 +01002786 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01002787 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01002788 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01002789
Jan Eilersc0761e92020-06-29 16:48:44 +01002790 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01002791 int numInferred{0};
2792 unsigned int inferIdx{0};
2793 int splitSum{0};
2794 for (auto split : splitsData)
2795 {
2796 if (split < 0)
2797 {
2798 numInferred++;
2799 inferIdx = idx;
2800 }
2801 else
2802 {
2803 splitSum += split;
2804 }
2805 idx++;
2806 }
2807 // Check for inferred Axis
2808 if (numInferred == 0)
2809 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002810 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01002811 {
2812 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
2813 }
2814 }
2815 else if (numInferred == 1)
2816 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002817 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01002818 }
2819 else
2820 {
2821 throw ParseException("Cannot infer split size for more than one split");
2822 }
2823
Derek Lambertif0176992020-04-28 13:37:49 +01002824 //Ouput size validation
2825 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2826 CHECK_VALID_SIZE(outputs.size(), numSplits);
2827
2828 // Setup Armnn descriptor
2829 SplitterDescriptor splitDesc(numSplits, inputDimSize);
2830 unsigned int accumSplit = 0;
2831 for (unsigned int j = 0; j < numSplits; ++j)
2832 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002833 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01002834
2835 // Set the size of the views.
2836 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
2837 {
2838 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
2839 if (dimIdx == splitDim)
2840 {
2841 dimSize = splitSize;
2842 }
2843 splitDesc.SetViewSize(j, dimIdx, dimSize);
2844 }
2845
2846 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
2847 accumSplit += splitSize;
2848 }
2849
James Ward58dec6b2020-09-11 17:32:44 +01002850 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01002851 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002852 ARMNN_ASSERT(layer != nullptr);
Derek Lambertif0176992020-04-28 13:37:49 +01002853
2854 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2855 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2856
2857 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2858 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002859 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01002860 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
2861 }
2862
2863 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2864 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2865}
2866
Inki Daed4619e22020-09-10 15:33:54 +09002867void TfLiteParser::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
2868{
2869 const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2870 const auto *options = operatorPtr->builtin_options.AsArgMaxOptions();
2871
2872 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2873 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2874 CHECK_VALID_SIZE(inputs.size(), 2);
2875
2876 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2877 CHECK_VALID_SIZE(outputs.size(), 1);
2878
James Ward58dec6b2020-09-11 17:32:44 +01002879 auto layerName = fmt::format("ArgMax:{}:{}", subgraphIndex, operatorIndex);
Inki Daed4619e22020-09-10 15:33:54 +09002880
2881 armnn::TensorInfo sizeTensorInfo0 = ToTensorInfo(inputs[0]);
2882 armnn::TensorInfo sizeTensorInfo1 = ToTensorInfo(inputs[1]);
2883
2884 // Get const axis value from model and set it to descriptor.
2885 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2886
2887 ArgMinMaxDescriptor desc;
2888 desc.m_Axis = axisBufferPtr->data.data()[0];
2889 // If output_type is int32 then set Signed32 else Signed64. Default type is Signed64.
2890 desc.m_Output_Type = options->output_type == 3 ? armnn::DataType::Signed32 : armnn::DataType::Signed64;
2891 desc.m_Function = ArgMinMaxFunction::Max;
2892
2893 // Register a ArgMax layer.
2894 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerName.c_str());
2895
2896 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2897 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2898
2899 // Register input tensor to the layer.
2900 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2901 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2902
2903 // Register output tensor to the layer.
2904 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2905 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2906}
2907
Sadik Armagan26868492021-01-22 14:25:31 +00002908void TfLiteParser::ParseGather(size_t subgraphIndex, size_t operatorIndex)
2909{
2910 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2911
2912 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2913 CHECK_VALID_SIZE(inputs.size(), 2);
2914 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2915 CHECK_VALID_SIZE(outputs.size(), 1);
2916
2917 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2918 armnn::TensorInfo indicesTensorInfo = ToTensorInfo(inputs[1]);
2919 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
2920
2921 armnn::GatherDescriptor gatherDescriptor;
2922
2923 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2924 const auto * options = operatorPtr->builtin_options.AsGatherOptions();
2925 auto axis = options->axis;
2926
2927 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
2928 auto indicesDimensions = indicesTensorInfo.GetNumDimensions();
2929 auto outputDimensions = outputTensorInfo.GetNumDimensions();
2930 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
2931 {
2932 throw ParseException(
2933 fmt::format("Operation has invalid axis: {} It is out of bounds [ -{}, {} ) {}",
2934 axis,
2935 inputDimensions, inputDimensions,
2936 CHECK_LOCATION().AsString()));
2937 }
2938 if (outputDimensions != static_cast<unsigned int>(inputDimensions) + indicesDimensions - 1)
2939 {
2940 throw ParseException(
2941 fmt::format("Operation has invalid output dimensions: {} Output must be an ({} + {} - 1) -D tensor {}",
2942 outputDimensions,
2943 inputDimensions, indicesDimensions,
2944 CHECK_LOCATION().AsString()));
2945 }
2946
2947 gatherDescriptor.m_Axis = axis;
2948
2949 auto layerName = fmt::format("Gather:{}:{}", subgraphIndex, operatorIndex);
2950 IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
2951 ARMNN_ASSERT(layer != nullptr);
2952 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2953
2954 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2955 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2956
2957 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2958 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2959}
2960
2961void TfLiteParser::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
2962{
2963 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2964
2965 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2966 CHECK_VALID_SIZE(inputs.size(), 1);
2967 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2968 CHECK_VALID_SIZE(outputs.size(), 1);
2969
2970 armnn::DepthToSpaceDescriptor descriptor;
2971
2972 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2973 const auto * options = operatorPtr->builtin_options.AsDepthToSpaceOptions();
2974 auto blockSize = options->block_size;
2975 if (blockSize < 2)
2976 {
2977 throw ParseException(
2978 fmt::format("Operation has invalid block size: {} Block size should be >= 2 {}",
2979 blockSize,
2980 CHECK_LOCATION().AsString()));
2981 }
2982 descriptor.m_BlockSize = armnn::numeric_cast<uint32_t>(blockSize);
2983
2984 auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
2985 IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
2986 ARMNN_ASSERT(layer != nullptr);
2987 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
2988 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2989
2990 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2991 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2992
2993 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2994 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2995}
2996
Sadik Armagan58f39192018-09-17 14:14:39 +01002997armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2998 unsigned int outputSlot,
2999 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01003000{
3001 ActivationDescriptor activationDesc;
3002 std::string layerName = prevLayer->GetName();
3003
3004 switch(activationType)
3005 {
3006 case tflite::ActivationFunctionType_NONE:
3007 {
3008 // this is a no-op: return previous layer
3009 return prevLayer;
3010 }
3011 case tflite::ActivationFunctionType_RELU:
3012 {
3013 activationDesc.m_Function = ActivationFunction::ReLu;
3014 layerName += ":RELU";
3015 break;
3016 }
3017 case tflite::ActivationFunctionType_RELU6:
3018 {
3019 activationDesc.m_Function = ActivationFunction::BoundedReLu;
3020 activationDesc.m_A = 6.0f;
3021 activationDesc.m_B = 0.0f;
3022 layerName += ":RELU6";
3023 break;
3024 }
3025 case tflite::ActivationFunctionType_TANH:
3026 {
3027 activationDesc.m_Function = ActivationFunction::TanH;
3028 activationDesc.m_A = 1.0f;
3029 activationDesc.m_B = 1.0f;
3030 layerName += ":TANH";
3031 break;
3032 }
3033
3034 // I only put these here as a reminder what others we could support
3035 case tflite::ActivationFunctionType_RELU_N1_TO_1:
3036 case tflite::ActivationFunctionType_SIGN_BIT:
3037 default:
3038 {
3039 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003040 fmt::format("TfLite parser doesn't suppport fused activation: "
3041 "{}/{} {} ",
3042 activationType,
3043 tflite::EnumNameActivationFunctionType(activationType),
3044 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003045
3046 }
3047 }
3048
3049 IConnectableLayer* activationLayer =
3050 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
3051
3052 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
3053 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
3054 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
3055 return activationLayer;
3056}
3057
3058TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
3059{
3060 if (fileName == nullptr)
3061 {
James Ward58dec6b2020-09-11 17:32:44 +01003062 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003063 CHECK_LOCATION().AsString()));
3064 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01003065 std::error_code errorCode;
3066 fs::path pathToFile(fileName);
3067 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01003068 {
James Ward58dec6b2020-09-11 17:32:44 +01003069 //fmt::format() could not be used here (format error)
3070 std::stringstream msg;
3071 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
3072 << " " << CHECK_LOCATION().AsString();
3073
3074 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01003075 }
3076 std::ifstream file(fileName, std::ios::binary);
3077 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3078 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3079 fileContent.size());
3080}
3081
3082TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
3083{
3084 if (binaryContent == nullptr)
3085 {
James Ward58dec6b2020-09-11 17:32:44 +01003086 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01003087 CHECK_LOCATION().AsString()));
3088 }
3089 flatbuffers::Verifier verifier(binaryContent, len);
3090 if (verifier.VerifyBuffer<tflite::Model>() == false)
3091 {
3092 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003093 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
3094 "flatbuffers format. size:{} {}",
3095 len,
3096 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003097 }
3098 return tflite::UnPackModel(binaryContent);
3099}
3100
3101TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
3102 size_t subgraphIndex,
3103 size_t operatorIndex)
3104{
3105 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3106
Derek Lambertiff05cc52019-04-26 13:05:17 +01003107 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3108 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003109
3110 size_t inputCount = operatorPtr->inputs.size();
3111 TensorRawPtrVector result(inputCount);
3112 for (size_t i=0; i<inputCount; ++i)
3113 {
3114 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003115 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003116 }
3117 return result;
3118}
3119
3120TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
3121 size_t subgraphIndex,
3122 size_t operatorIndex)
3123{
3124 CHECK_MODEL(model, subgraphIndex, operatorIndex);
3125
Derek Lambertiff05cc52019-04-26 13:05:17 +01003126 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3127 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003128
3129 size_t outputCount = operatorPtr->outputs.size();
3130 TensorRawPtrVector result(outputCount);
3131 for (size_t i=0; i<outputCount; ++i)
3132 {
3133 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3134 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003135 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003136 }
3137 return result;
3138}
3139
3140TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
3141 size_t subgraphIndex)
3142{
3143 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003144 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003145
Derek Lambertiff05cc52019-04-26 13:05:17 +01003146 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003147 TensorIdRawPtrVector result(inputCount);
3148 for (size_t i=0; i<inputCount; ++i)
3149 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003150 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01003151 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003152 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003153 }
3154 return result;
3155}
3156
3157TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
3158 size_t subgraphIndex)
3159{
3160 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003161 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003162
Derek Lambertiff05cc52019-04-26 13:05:17 +01003163 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003164 TensorIdRawPtrVector result(outputCount);
3165 for (size_t i=0; i<outputCount; ++i)
3166 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003167 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3168 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003169 }
3170 return result;
3171}
3172
3173std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
3174 size_t subgraphIndex,
3175 size_t operatorIndex)
3176{
3177 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003178 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3179 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003180 return operatorPtr->inputs;
3181}
3182
3183std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
3184 size_t subgraphIndex,
3185 size_t operatorIndex)
3186{
3187 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003188 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3189 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003190 return operatorPtr->outputs;
3191}
3192
3193void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
3194 size_t operatorIndex,
3195 IConnectableLayer* layer,
3196 const std::vector<unsigned int>& tensorIndexes)
3197{
3198 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003199 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003200 if (tensorIndexes.size() != layer->GetNumInputSlots())
3201 {
3202 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003203 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
3204 " for subgraph:{} operator index:{} {}",
3205 tensorIndexes.size(),
3206 layer->GetNumInputSlots(),
3207 subgraphIndex,
3208 operatorIndex,
3209 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003210 }
3211
3212 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
3213 {
3214 unsigned int tensorIndex = tensorIndexes[slotIndex];
3215 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
3216 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
3217 }
3218}
3219
3220void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
3221 size_t operatorIndex,
3222 IConnectableLayer* layer,
3223 const std::vector<unsigned int>& tensorIndexes)
3224{
3225 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003226 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003227 if (tensorIndexes.size() != layer->GetNumOutputSlots())
3228 {
3229 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003230 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
3231 " for subgraph:{} operator index:{} {}",
3232 tensorIndexes.size(),
3233 layer->GetNumOutputSlots(),
3234 subgraphIndex,
3235 operatorIndex,
3236 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003237 }
3238
3239 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
3240 {
3241 unsigned int tensorIndex = tensorIndexes[slotIndex];
3242 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
3243 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3244 }
3245}
3246
3247void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
3248{
3249 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3250
3251 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
3252 for (auto const & tensorIdAndPtr : inputs)
3253 {
3254 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3255 IConnectableLayer* layer =
3256 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3257
3258 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
3259 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3260
3261 RegisterOutputSlots(subgraphIndex,
3262 VIRTUAL_OPERATOR_ID,
3263 layer,
3264 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3265 }
3266}
3267
3268void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
3269{
3270 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3271
3272 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
3273 for (auto const & tensorIdAndPtr : outputs)
3274 {
3275 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3276 IConnectableLayer* layer =
3277 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3278
3279 RegisterInputSlots(subgraphIndex,
3280 VIRTUAL_OPERATOR_ID,
3281 layer,
3282 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3283 }
3284}
3285
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003286void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
3287{
3288 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3289
Derek Lambertiff05cc52019-04-26 13:05:17 +01003290 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003291 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
3292 {
3293 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
3294 {
3295 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
3296 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
3297 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003298 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003299 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
3300 auto tensorAndData = CreateConstTensor(tensorPtr,
3301 tensorInfo,
3302 armnn::Optional<armnn::PermutationVector&>());
3303
James Ward58dec6b2020-09-11 17:32:44 +01003304 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003305 IConnectableLayer *layer =
3306 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
3307
3308 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3309 RegisterOutputSlots(subgraphIndex,
3310 VIRTUAL_OPERATOR_ID,
3311 layer,
3312 { tensorIndex });
3313
3314 }
3315 }
3316 }
3317}
3318
telsoa01c577f2c2018-08-31 09:22:23 +01003319// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
3320TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
3321{
3322 CHECK_BUFFER(model, bufferIndex);
3323 return model->buffers[bufferIndex].get();
3324}
3325
Matteo Martincigh747ef822018-12-18 09:26:39 +00003326template<typename T>
3327std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
3328TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
3329 TfLiteParser::TensorRawPtr tensorPtr,
3330 armnn::TensorInfo& tensorInfo,
3331 armnn::Optional<armnn::PermutationVector&> permutationVector)
3332{
3333 auto constData = CreateConstTensorImpl<T>(bufferPtr,
3334 tensorPtr,
3335 tensorInfo,
3336 permutationVector);
3337 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
3338 return std::make_pair(constData.first, std::move(storage));
3339}
3340
telsoa01c577f2c2018-08-31 09:22:23 +01003341std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
3342TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00003343 armnn::TensorInfo& tensorInfo,
3344 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01003345{
3346 CHECK_TENSOR_PTR(tensorPtr);
3347 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
3348 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
3349
3350 switch (tensorInfo.GetDataType())
3351 {
3352 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003353 return CreateConstTensorAndStoreData<float>(bufferPtr,
3354 tensorPtr,
3355 tensorInfo,
3356 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00003357 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003358 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
3359 tensorPtr,
3360 tensorInfo,
3361 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00003362 case armnn::DataType::QSymmS8:
3363 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3364 tensorPtr,
3365 tensorInfo,
3366 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00003367 case armnn::DataType::QAsymmS8:
3368 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3369 tensorPtr,
3370 tensorInfo,
3371 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003372 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003373 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
3374 tensorPtr,
3375 tensorInfo,
3376 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003377 default:
3378 {
3379 std::stringstream errString;
3380 errString << "Unexpected datatype when creating const tensor: "
3381 << armnn::GetDataTypeName(tensorInfo.GetDataType())
3382 << " shape:" << tensorInfo.GetShape()
3383 << CHECK_LOCATION().AsString();
3384 throw ParseException(errString.str());
3385 }
3386 }
3387}
3388
3389BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
3390 const std::string& name) const
3391{
3392 CHECK_SUBGRAPH(m_Model, subgraphId);
3393 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3394 for (auto const & input : inputs)
3395 {
3396 if (input.second->name == name)
3397 {
3398 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3399 return std::make_pair(bindingId, ToTensorInfo(input.second));
3400 }
3401 }
3402
3403 std::stringstream bindings;
3404 for (auto const & input : inputs)
3405 {
3406 bindings << "'" << input.second->name << "' ";
3407 }
3408
3409 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003410 fmt::format("No input binding found for subgraph:{} and name:{}. "
3411 "Possible inputs are: [{}] {}",
3412 subgraphId,
3413 name,
3414 bindings.str(),
3415 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003416}
3417
3418BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
3419 const std::string& name) const
3420{
3421 CHECK_SUBGRAPH(m_Model, subgraphId);
3422 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003423 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01003424 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003425 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01003426 if (output.second->name == name)
3427 {
3428 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003429 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3430 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3431 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01003432 }
3433 }
3434
3435 std::stringstream bindings;
3436 for (auto const & output : outputs)
3437 {
3438 bindings << "'" << output.second->name << "' ";
3439 }
3440
3441 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003442 fmt::format("No output binding found for subgraph:{} and name:{}. "
3443 "Possible outputs are: [{}] {}",
3444 subgraphId,
3445 name,
3446 bindings.str(),
3447 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003448}
3449
3450size_t TfLiteParser::GetSubgraphCount() const
3451{
3452 return m_Model->subgraphs.size();
3453}
3454
3455std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
3456{
3457 CHECK_SUBGRAPH(m_Model, subgraphId);
3458 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3459 std::vector<std::string> result;
3460 result.reserve(inputs.size());
3461 for (auto const & input : inputs)
3462 {
3463 result.push_back(input.second->name);
3464 }
3465 return result;
3466}
3467
3468std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
3469{
3470 CHECK_SUBGRAPH(m_Model, subgraphId);
3471 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3472 std::vector<std::string> result;
3473 result.reserve(outputs.size());
3474 for (auto const & output : outputs)
3475 {
3476 result.push_back(output.second->name);
3477 }
3478 return result;
3479}
3480
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003481ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003482{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003483 return new TfLiteParser(options);
telsoa01c577f2c2018-08-31 09:22:23 +01003484}
3485
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003486ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003487{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003488 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +01003489}
3490
3491void ITfLiteParser::Destroy(ITfLiteParser* parser)
3492{
3493 delete parser;
3494}
3495
3496TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
3497: m_FloatData(std::move(data))
3498, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003499, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003500, m_Int32Data(nullptr)
3501{
3502}
3503
3504TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
3505: m_FloatData(nullptr)
3506, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00003507, m_Int8Data(nullptr)
3508, m_Int32Data(nullptr)
3509{
3510}
3511
3512TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
3513: m_FloatData(nullptr)
3514, m_Uint8Data(nullptr)
3515, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01003516, m_Int32Data(nullptr)
3517{
3518}
3519
3520TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
3521: m_FloatData(nullptr)
3522, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003523, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003524, m_Int32Data(std::move(data))
3525{
3526}
3527
3528} // armnnTfLiteParser