blob: d1d45f5583e412284c59e43f9fda5866ea4ea440 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Mike Kellyc5789ca2020-07-06 19:24:15 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "TfLiteParser.hpp"
7
Sadik Armagand109a4d2020-07-28 10:42:13 +01008#include <armnn/BackendOptions.hpp>
Matthew Bentham39ef3e52020-01-20 10:09:09 +00009#include <armnn/Descriptors.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010010#include <armnn/Exceptions.hpp>
Derek Lamberti08446972019-11-26 16:38:31 +000011#include <armnn/Logging.hpp>
James Conroy05102392020-06-24 15:39:55 +010012#include <armnn/Tensor.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010013#include <armnn/TypesUtils.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010014#include <armnn/utility/Assert.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000015#include <armnn/utility/IgnoreUnused.hpp>
Derek Lambertif0176992020-04-28 13:37:49 +010016#include <armnn/utility/NumericCast.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010017
18// armnnUtils:
Matteo Martincighe011d202019-11-28 11:35:47 +000019#include <armnnUtils/Permute.hpp>
Francis Murtagh532a29d2020-06-29 11:50:01 +010020#include <Filesystem.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000021
Sadik Armagan479045b2018-10-01 11:51:37 +010022#include <ParserHelper.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010023#include <VerificationHelpers.hpp>
24
25// The generated code based on the Tf Lite schema:
26#include <schema_generated.h>
27
Matteo Martincighe011d202019-11-28 11:35:47 +000028#include <flatbuffers/flexbuffers.h>
29
James Ward58dec6b2020-09-11 17:32:44 +010030#include <fmt/format.h>
telsoa01c577f2c2018-08-31 09:22:23 +010031
32#include <fstream>
33#include <algorithm>
34#include <limits>
Sadikb94967b2018-09-19 15:30:00 +010035#include <numeric>
Derek Lambertic9e52792020-03-11 11:42:26 +000036#include <sstream>
37
38#define ARMNN_THROW_PARSE_EXCEPTION(msg) \
39 { \
40 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \
41 << ": " \
42 << CHECK_LOCATION().AsString()).str()); \
43 }
telsoa01c577f2c2018-08-31 09:22:23 +010044
45using namespace armnn;
46using armnn::CheckLocation;
47namespace armnnTfLiteParser
48{
49namespace
50{
jimfly01c25411c2018-11-14 17:47:22 +000051
telsoa01c577f2c2018-08-31 09:22:23 +010052const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
53
54void CheckSubgraph(const TfLiteParser::ModelPtr & model,
55 size_t subgraphIndex,
56 const CheckLocation & location)
57{
58 if (model.get() == nullptr)
59 {
60 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +010061 fmt::format("{} was called with invalid (null) model. "
62 "Possible reason is that the model is not yet loaded and Unpack(ed). "
63 "subgraph:{} at {}",
64 location.m_Function,
65 subgraphIndex,
66 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +010067 }
68 else if (subgraphIndex >= model->subgraphs.size())
69 {
70 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +010071 fmt::format("{} was called with an invalid subgraph index. "
72 "subgraph:{} at {}",
73 location.m_Function,
74 subgraphIndex,
75 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +010076 }
77}
78
79#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
80 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
81
82void CheckModel(const TfLiteParser::ModelPtr & model,
83 size_t subgraphIndex,
84 size_t operatorIndex,
85 const CheckLocation & location)
86{
87 if (model.get() == nullptr)
88 {
89 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +010090 fmt::format("{} was called with invalid (null) model. "
91 "Possible reason is that the model is not yet loaded and Unpack(ed). "
92 "subgraph:{} operator:{} at {}",
93 location.m_Function,
94 subgraphIndex,
95 operatorIndex,
96 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +010097 }
98 else if (subgraphIndex >= model->subgraphs.size())
99 {
100 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100101 fmt::format("{} was called with an invalid subgraph index. "
102 "subgraph:{} operator:{} at {}",
103 location.m_Function,
104 subgraphIndex,
105 operatorIndex,
106 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100107 }
108 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
109 operatorIndex != VIRTUAL_OPERATOR_ID)
110 {
111 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100112 fmt::format("{} was called with an invalid operator index. "
113 "subgraph:{} operator:{} at {}",
114 location.m_Function,
115 subgraphIndex,
116 operatorIndex,
117 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100118 }
119}
120
121#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
122 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
123
124void CheckTensor(const TfLiteParser::ModelPtr & model,
125 size_t subgraphIndex,
126 size_t tensorIndex,
127 const CheckLocation & location)
128{
129 // not checking model, because I assume CHECK_MODEL already run
130 // and checked that. An assert would do.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100131 ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
telsoa01c577f2c2018-08-31 09:22:23 +0100132
133 // also subgraph index should be checked by CHECK_MODEL so
134 // I only add an assert here
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100135 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
telsoa01c577f2c2018-08-31 09:22:23 +0100136
137 // the tensor index is the only one to check here
138 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
139 {
140 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100141 fmt::format("{} was called with an invalid tensor index. "
142 "subgraph:{} tensor:{} at {}",
143 location.m_Function,
144 subgraphIndex,
145 tensorIndex,
146 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100147 }
148}
149
150#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
151 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
152
153void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
154 const CheckLocation & location)
155{
156 if (rawPtr == nullptr)
157 {
158 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100159 fmt::format("{} was called with a null tensor pointer at {}", location.m_Function, location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100160 }
161}
162
163#define CHECK_TENSOR_PTR(TENSOR_PTR) \
164 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
165
166void CheckBuffer(const TfLiteParser::ModelPtr & model,
167 size_t bufferIndex,
168 const CheckLocation & location)
169{
170 if (model.get() == nullptr)
171 {
172 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100173 fmt::format("{} was called with invalid (null) model. "
174 "Possible reason is that the model is not yet loaded and Unpack(ed). "
175 "buffer:{} at {}",
176 location.m_Function,
177 bufferIndex,
178 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100179 }
180 else if (bufferIndex >= model->buffers.size())
181 {
182 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100183 fmt::format("{} was called with an invalid buffer index. "
184 "buffer index:{} at {}",
185 location.m_Function,
186 bufferIndex,
187 location.FileLine()));
telsoa01c577f2c2018-08-31 09:22:23 +0100188 }
189 else if (model->buffers[bufferIndex].get() == nullptr)
190 {
191 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100192 fmt::format("The buffer #{} is null. {}",
193 bufferIndex,
194 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100195 }
196}
197
198#define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
199 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
200
201void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
202 const armnn::TensorInfo & tensorInfo,
203 uint32_t bufferId,
204 const CheckLocation & location)
205{
206 if (bufferPtr == nullptr)
207 {
208 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100209 fmt::format("BufferPtr is null for buffer:{}. {}",
210 bufferId,
211 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100212 }
213 else if(tensorInfo.GetNumElements() > bufferPtr->data.size() ||
214 tensorInfo.GetNumBytes() > bufferPtr->data.size())
215 {
216 std::stringstream ss;
217 ss << "Buffer #" << bufferId << " has " << bufferPtr->data.size() << " bytes. "
218 << "For tensor: " << tensorInfo.GetShape()
219 << " expecting: " << tensorInfo.GetNumBytes() << " bytes and "
220 << tensorInfo.GetNumElements() << " elements. " << location.AsString();
221 throw ParseException(ss.str());
222 }
223}
224
225#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \
226 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION())
227
228bool IsActivationSupported(tflite::ActivationFunctionType activationType)
229{
230 switch(activationType)
231 {
232 case tflite::ActivationFunctionType_NONE:
233 case tflite::ActivationFunctionType_RELU:
234 case tflite::ActivationFunctionType_RELU6:
235 case tflite::ActivationFunctionType_TANH:
236 {
237 return true;
238 }
239 default:
240 {
241 return false;
242 }
243 }
244}
245
246#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \
247 do { \
248 if (IsActivationSupported(OPTION->fused_activation_function) == false) \
249 { \
250 throw ParseException( \
James Ward58dec6b2020-09-11 17:32:44 +0100251 fmt::format("TfLite parser doesn't suppport fused activation: " \
252 "{}/{} in {} subgraph:{} operator:{} at {}", \
253 OPTION->fused_activation_function, \
254 tflite::EnumNameActivationFunctionType(\
255 OPTION->fused_activation_function), \
256 __func__, \
257 SUBGRAPH_INDEX, \
258 OPERATOR_INDEX, \
259 CHECK_LOCATION().FileLine())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100260 } \
261 } while(false)
262
263
264std::vector<unsigned int> AsUnsignedVector(const std::vector<int32_t> & in)
265{
266 std::vector<unsigned int> result;
267 result.reserve(in.size());
268 for (auto & i : in)
269 {
270 result.push_back(CHECKED_NON_NEGATIVE(i));
271 }
272 return result;
273}
274
275void CalcPadding(uint32_t inputSize,
276 uint32_t filterSize,
277 uint32_t stride,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100278 uint32_t dilation,
telsoa01c577f2c2018-08-31 09:22:23 +0100279 uint32_t& paddingFront,
280 uint32_t& paddingBack,
281 tflite::Padding padding)
282{
283 paddingFront = 0;
284 paddingBack = 0;
285 if (padding == tflite::Padding_SAME)
286 {
287 uint32_t outputSize = (inputSize + stride - 1) / stride;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100288 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
289 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
telsoa01c577f2c2018-08-31 09:22:23 +0100290 if (temp > inputSize)
291 {
292 paddingFront = (temp - inputSize) / 2;
293 paddingBack = (temp - inputSize) - paddingFront;
294 }
295 }
296}
297
Sadik Armagand109a4d2020-07-28 10:42:13 +0100298armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
299 const std::vector<unsigned int>& shapes,
300 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3},
301 const bool outputTensor = false)
telsoa01c577f2c2018-08-31 09:22:23 +0100302{
303 armnn::DataType type;
304 CHECK_TENSOR_PTR(tensorPtr);
305
306 switch (tensorPtr->type)
307 {
308 case tflite::TensorType_UINT8:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000309 type = armnn::DataType::QAsymmU8;
telsoa01c577f2c2018-08-31 09:22:23 +0100310 break;
311 case tflite::TensorType_FLOAT32:
312 type = armnn::DataType::Float32;
313 break;
Finn Williamsed66d142019-12-06 09:55:55 +0000314 case tflite::TensorType_INT8:
Keith Davis67e6c542020-02-19 10:08:33 +0000315 if (tensorPtr->quantization->zero_point.size() == 1)
Ryan OShea03181ff2020-02-07 17:22:22 +0000316 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000317 // Per-tensor
Ryan OShea03181ff2020-02-07 17:22:22 +0000318 type = armnn::DataType::QAsymmS8;
319 }
320 else
321 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000322 // Per-channel
Ryan OShea03181ff2020-02-07 17:22:22 +0000323 type = armnn::DataType::QSymmS8;
324 }
Finn Williamsed66d142019-12-06 09:55:55 +0000325 break;
326 case tflite::TensorType_INT16:
Derek Lambertif90c56d2020-01-10 17:14:08 +0000327 type = armnn::DataType::QSymmS16;
Finn Williamsed66d142019-12-06 09:55:55 +0000328 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100329 case tflite::TensorType_INT32:
330 type = armnn::DataType::Signed32;
331 break;
Inki Daed4619e22020-09-10 15:33:54 +0900332 case tflite::TensorType_INT64:
333 type = armnn::DataType::Signed64;
334 break;
telsoa01c577f2c2018-08-31 09:22:23 +0100335 default:
336 {
337 CheckLocation location = CHECK_LOCATION();
338 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100339 fmt::format("Unsupported data type {} = {} for tensor: {}. {}",
340 tensorPtr->type,
341 tflite::EnumNameTensorType(tensorPtr->type),
342 tensorPtr->name,
343 location.AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100344 }
345 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100346 std::vector<unsigned int> safeShape = shapes;
Sadik Armagand109a4d2020-07-28 10:42:13 +0100347 bool isDynamic = false;
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100348 if (safeShape.size() == 0)
349 {
350 safeShape.push_back(1);
Sadik Armagand109a4d2020-07-28 10:42:13 +0100351 if (outputTensor)
352 {
353 isDynamic = true;
354 }
Narumol Prangnawarat4818d462019-04-17 11:22:38 +0100355 }
356
Keith Davisd305e1a2020-01-22 11:57:54 +0000357 float quantizationScale = 0.0f;
358 int32_t quantizationOffset = 0;
359
360 if (tensorPtr->quantization.get())
361 {
362 if (tensorPtr->quantization->scale.size() <= 1)
363 {
364 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
365 CHECK_VALID_SIZE(tensorPtr->quantization->zero_point.size(), 0, 1);
366
367 if (tensorPtr->quantization->scale.size() == 1)
368 {
369 quantizationScale = tensorPtr->quantization->scale[0];
370 }
371 if (tensorPtr->quantization->zero_point.size() == 1)
372 {
373 // NOTE: we lose precision here when converting from 64 bit to 32
Ryan OShea03181ff2020-02-07 17:22:22 +0000374 // but this is what we support at the moment in ArmNN
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100375 quantizationOffset = armnn::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000376 }
377
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100378 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100379 safeShape.data());
380 if (isDynamic)
381 {
382 tensorShape = TensorShape(1, false);
383 }
384 armnn::TensorInfo result(tensorShape,
385 type,
386 quantizationScale,
387 quantizationOffset);
Keith Davisd305e1a2020-01-22 11:57:54 +0000388 return result;
389 }
390 else
391 {
392 std::vector<float> quantizationScales;
393 std::vector<int32_t> quantizationOffsets;
394
395 // Scale
396 std::copy(tensorPtr->quantization->scale.begin(),
397 tensorPtr->quantization->scale.end(),
398 std::back_inserter(quantizationScales));
399
Keith Davis0c2eeac2020-02-11 16:51:50 +0000400 // QSymmS8 Per-axis
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100401 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100402 safeShape.data());
403 if (isDynamic)
404 {
405 tensorShape = TensorShape(1, false);
406 }
407 armnn::TensorInfo result(tensorShape,
408 type,
409 quantizationScales,
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100410 dimensionMappings[armnn::numeric_cast<unsigned int>(
Sadik Armagand109a4d2020-07-28 10:42:13 +0100411 tensorPtr->quantization->quantized_dimension)]);
Keith Davisd305e1a2020-01-22 11:57:54 +0000412 return result;
413 }
414 }
415 else
416 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100417 TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
Sadik Armagand109a4d2020-07-28 10:42:13 +0100418 safeShape.data());
419 if (isDynamic)
420 {
421 tensorShape = TensorShape(1, false);
422 }
423 armnn::TensorInfo result(tensorShape,
Keith Davisd305e1a2020-01-22 11:57:54 +0000424 type,
425 quantizationScale,
426 quantizationOffset);
427 return result;
428 }
telsoa01c577f2c2018-08-31 09:22:23 +0100429}
430
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +0100431armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
Keith Davis0c2eeac2020-02-11 16:51:50 +0000432 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000433{
434 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000435 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +0000436}
437
Sadik Armagand109a4d2020-07-28 10:42:13 +0100438armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
439 const bool outputTensor)
440{
441 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
442 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3};
443 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings, outputTensor);
444}
445
telsoa01c577f2c2018-08-31 09:22:23 +0100446template<typename T>
447std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
448CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
449 TfLiteParser::TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000450 armnn::TensorInfo& tensorInfo,
451 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +0100452{
Jan Eilers8eb25602020-03-09 12:13:48 +0000453 IgnoreUnused(tensorPtr);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100454 ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
455 ARMNN_ASSERT_MSG(bufferPtr != nullptr,
James Ward58dec6b2020-09-11 17:32:44 +0100456 fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
telsoa01c577f2c2018-08-31 09:22:23 +0100457
458 std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000459
460 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0)
461 {
462 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000463 armnnUtils::Permute(tensorInfo.GetShape(), permutationVector.value(),
464 reinterpret_cast<const T*>(bufferPtr->data.data()), data.get(), sizeof(T));
Matteo Martincigh747ef822018-12-18 09:26:39 +0000465 }
466 else
467 {
468 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.GetNumBytes());
469 }
470
telsoa01c577f2c2018-08-31 09:22:23 +0100471 return std::make_pair(ConstTensor(tensorInfo, data.get()), std::move(data));
472}
473
telsoa01c577f2c2018-08-31 09:22:23 +0100474armnn::LayerBindingId GenerateLayerBindingId(size_t subgraphIndex, size_t tensorIndex)
475{
476 // generate the binding id by shifting the tensor id by 8 bit
477 // and add the subgraph id, which allows 256 subgraphs
478 return static_cast<armnn::LayerBindingId>((tensorIndex<<8)+subgraphIndex);
479}
480
Aron Virginas-Tar70672f62019-01-23 14:00:00 +0000481bool CheckShape(const armnn::TensorShape& actual, const std::vector<int32_t>& expected)
482{
483 const unsigned int actualSize = actual.GetNumDimensions();
484 if (actualSize != expected.size())
485 {
486 return false;
487 }
488
489 for (unsigned int i = 0u; i < actualSize; i++)
490 {
491 if (expected[i] < 0 ||
492 actual[i] != static_cast<unsigned int>(expected[i]))
493 {
494 return false;
495 }
496 }
497
498 return true;
499}
500
James Conroy05102392020-06-24 15:39:55 +0100501void CheckMatchingQuantization(const TensorInfo& first,
502 const TensorInfo& second,
503 const std::string& descName,
504 std::string const& firstName,
505 std::string const& secondName)
506{
507 if (!first.IsQuantized() ||
508 !second.IsQuantized())
509 {
510 // Not a quantized type, ignore the validation
511 return;
512 }
513
514 DataType firstDataType = first.GetDataType();
515 DataType secondDataType = second.GetDataType();
516
517 if (firstDataType != secondDataType)
518 {
519 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
520 " must be of the same quantized type, " +
521 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
522 secondName + " is " + GetDataTypeName(secondDataType));
523 }
524
525 if (!first.IsTypeSpaceMatch(second))
526 {
527 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
528 " must have the same quantization space, " +
529 firstName + " has offset " + std::to_string(first.GetQuantizationOffset()) +
530 " and scale " + std::to_string(first.GetQuantizationScale()) + ", " +
531 secondName + " has offset " + std::to_string(second.GetQuantizationOffset()) +
532 " and scale " + std::to_string(second.GetQuantizationScale()));
533 }
534}
535
telsoa01c577f2c2018-08-31 09:22:23 +0100536} // <anonymous>
537
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100538TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
539: m_Options(options)
540, m_Network(nullptr, nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +0100541, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
542{
543 // register supported operators
Sadik Armagan66dedc72019-12-10 16:32:07 +0000544 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000545 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
546 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
547 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
548 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000549 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000550 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
Finn Williamsed66d142019-12-06 09:55:55 +0000551 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
Derek Lambertif0176992020-04-28 13:37:49 +0100552 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParser::ParseExp;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000553 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
Jan Eilers2f746b32020-07-28 14:00:06 +0100554 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParser::ParseHardSwish;
Sadik Armagan12239e72020-05-27 11:06:17 +0100555 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParser::ParseLeakyRelu;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000556 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
557 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
558 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
559 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000560 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000561 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000562 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
Darshan Patel83fcf982020-05-26 22:22:42 +0530563 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParser::ParseNeg;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000564 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
565 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
566 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000567 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
568 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
569 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
570 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
571 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000572 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000573 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
574 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
Sadik Armagan66dedc72019-12-10 16:32:07 +0000575 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
Derek Lambertif0176992020-04-28 13:37:49 +0100576 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParser::ParseSplitV;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000577 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
578 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
579 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
Sadik Armagana3b31f02019-12-05 09:08:53 +0000580 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
581 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
582 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
583 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
Darshan Patel42b3d7d2020-05-25 22:30:07 +0530584 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParser::ParseDiv;
Inki Daed4619e22020-09-10 15:33:54 +0900585 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParser::ParseArgMax;
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100586 // register supported custom operators
587 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
telsoa01c577f2c2018-08-31 09:22:23 +0100588}
589
590void TfLiteParser::ResetParser()
591{
592 m_Network = armnn::INetworkPtr(nullptr, nullptr);
593 m_Model = nullptr;
594 m_SubgraphConnections.clear();
595}
596
597INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
598{
599 ResetParser();
600 m_Model = LoadModelFromFile(graphFile);
601 return CreateNetworkFromModel();
602}
603
604INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
605{
606 ResetParser();
607 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
608 return CreateNetworkFromModel();
609}
610
611INetworkPtr TfLiteParser::CreateNetworkFromModel()
612{
Sadik Armagand109a4d2020-07-28 10:42:13 +0100613
614 using NetworkOptions = std::vector<BackendOptions>;
615 NetworkOptions networkOptions = {};
616 if (m_Options && m_Options.value().m_InferAndValidate)
617 {
618 BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
619 {
620 { "InferAndValidate", true }
621 });
622
623 networkOptions.push_back(shapeInferenceMethodOption);
624 }
625
626 m_Network = INetwork::Create(networkOptions);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100627 ARMNN_ASSERT(m_Model.get() != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100628
telsoa01c577f2c2018-08-31 09:22:23 +0100629 if (m_Model->subgraphs.size() != 1)
630 {
631 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100632 fmt::format("Current TfLite parser only supports 1 subgraph. Current one has: {} {}",
633 m_Model->subgraphs.size(),
634 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100635 }
636
637 size_t subgraphIndex = 0;
Colm Donelan6350d272020-06-09 16:56:25 +0100638 size_t operatorIndex = 0;
639 try
telsoa01c577f2c2018-08-31 09:22:23 +0100640 {
Colm Donelan6350d272020-06-09 16:56:25 +0100641 for (SubgraphPtr const& subgraph : m_Model->subgraphs)
telsoa01c577f2c2018-08-31 09:22:23 +0100642 {
Colm Donelan6350d272020-06-09 16:56:25 +0100643 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
644 for (OperatorPtr const& op : subgraph->operators)
telsoa01c577f2c2018-08-31 09:22:23 +0100645 {
Colm Donelan6350d272020-06-09 16:56:25 +0100646 auto const& opCodePtr = m_Model->operator_codes[op->opcode_index];
telsoa01c577f2c2018-08-31 09:22:23 +0100647 auto builtinCode = opCodePtr->builtin_code;
648
649 if (builtinCode > tflite::BuiltinOperator_MAX)
650 {
James Ward58dec6b2020-09-11 17:32:44 +0100651 throw ParseException(fmt::format("Operator code {} is out of range 0-{}. "
652 "subgraph:{} operator idx:{}. {}",
653 builtinCode, tflite::BuiltinOperator_MAX, subgraphIndex,
654 operatorIndex, CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100655 }
656
657 // lookup and call the parser function
Colm Donelan6350d272020-06-09 16:56:25 +0100658 auto& parserFunction = m_ParserFunctions[builtinCode];
telsoa01c577f2c2018-08-31 09:22:23 +0100659 (this->*parserFunction)(subgraphIndex, operatorIndex);
Colm Donelan6350d272020-06-09 16:56:25 +0100660 ++operatorIndex;
telsoa01c577f2c2018-08-31 09:22:23 +0100661 }
telsoa01c577f2c2018-08-31 09:22:23 +0100662
Colm Donelan6350d272020-06-09 16:56:25 +0100663 SetupInputLayers(subgraphIndex);
664 SetupOutputLayers(subgraphIndex);
665 SetupConstantLayers(subgraphIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100666
Colm Donelan6350d272020-06-09 16:56:25 +0100667 ++subgraphIndex;
668 operatorIndex = 0;
telsoa01c577f2c2018-08-31 09:22:23 +0100669 }
telsoa01c577f2c2018-08-31 09:22:23 +0100670 }
Colm Donelan6350d272020-06-09 16:56:25 +0100671 catch (const ParseException& e)
telsoa01c577f2c2018-08-31 09:22:23 +0100672 {
Colm Donelan6350d272020-06-09 16:56:25 +0100673 std::stringstream errorString;
674 errorString << "Failed to parse operator #" << operatorIndex << " within subgraph #"
675 << subgraphIndex << " error: " << e.what();
676 ARMNN_LOG(error) << errorString.str();
677 std::stringstream errors;
678 errors << errorString.str() << "\n";
telsoa01c577f2c2018-08-31 09:22:23 +0100679 throw ParseException(errors.str());
680 }
681
682 // establish the connections from the layer outputs to the inputs of the subsequent layers
Colm Donelan6350d272020-06-09 16:56:25 +0100683 for (subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
telsoa01c577f2c2018-08-31 09:22:23 +0100684 {
685 for (size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
686 {
687 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot != nullptr)
688 {
689 for (size_t inputSlotIdx = 0;
690 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
691 ++inputSlotIdx)
692 {
693 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
694 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
695 }
696 }
697 }
698 }
699
700 return std::move(m_Network);
701}
702
703void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
704 size_t tensorIndex,
705 armnn::IOutputSlot* slot)
706{
707 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100708 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
709 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100710
711 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
712
713 // assuming there is only one producer for that tensor
714 if (tensorSlots.outputSlot != nullptr)
715 {
James Ward58dec6b2020-09-11 17:32:44 +0100716 throw ParseException(fmt::format("Another layer has already registered itself as the producer of "
717 "subgraph:{} tensor:{} {}",
718 subgraphIndex,
719 tensorIndex,
720 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +0100721 }
722
723 tensorSlots.outputSlot = slot;
724}
725
726void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
727 size_t tensorIndex,
728 armnn::IInputSlot* slot)
729{
730 CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100731 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
732 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100733
734 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
735 tensorSlots.inputSlots.push_back(slot);
736}
737
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100738void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
739{
740 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
741
742 // NOTE: By default we presume the custom operator is not supported
743 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
744
745 // Identify custom code defined for custom operator
746 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
747 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
748
749 // Find parser function that correspondes to custom code (if any)
750 auto iterator = m_CustomParserFunctions.find(customCode);
751 if (iterator != m_CustomParserFunctions.end())
752 {
753 customParserFunction = iterator->second;
754 }
755
756 // Run parser function
757 (this->*customParserFunction)(subgraphIndex, operatorIndex);
758}
759
telsoa01c577f2c2018-08-31 09:22:23 +0100760void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
761{
762 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100763
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100764 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
765
766 auto opcodeIndex = operatorPtr->opcode_index;
767 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
768
769 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
770 {
771 // Do not add StandInLayer, throw ParseException instead
772 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100773 fmt::format("Operator not supported. "
774 "subgraph:{} operator:{} "
775 "opcode_index:{} opcode:{} / {} {}",
776 subgraphIndex,
777 operatorIndex,
778 opcodeIndex,
779 opcode,
780 tflite::EnumNameBuiltinOperator(opcode),
781 CHECK_LOCATION().AsString()));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100782 }
783
784 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
785 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
786
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100787 const unsigned int numInputs = armnn::numeric_cast<unsigned int>(inputs.size());
788 const unsigned int numOutputs = armnn::numeric_cast<unsigned int>(outputs.size());
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100789
790 StandInDescriptor descriptor(numInputs, numOutputs);
James Ward58dec6b2020-09-11 17:32:44 +0100791 auto layerName = fmt::format("StandIn:{}:{}:{}", subgraphIndex, operatorIndex, opcode);
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100792
793 // Add a non-executable StandInLayer as a placeholder for any unsupported operator
794 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +0100795 ARMNN_ASSERT(layer != nullptr);
796
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100797 for (unsigned int i = 0u; i < numOutputs; ++i)
798 {
Sadik Armagand109a4d2020-07-28 10:42:13 +0100799 layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i], true));
Aron Virginas-Tarc975f922019-10-23 17:38:17 +0100800 }
801
802 auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
803 auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
804
805 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
806 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
telsoa01c577f2c2018-08-31 09:22:23 +0100807}
808
telsoa01c577f2c2018-08-31 09:22:23 +0100809void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
810{
811 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
812
813 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
814 const auto * options = operatorPtr->builtin_options.AsConv2DOptions();
815
816 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
817
818 Convolution2dDescriptor desc;
819 desc.m_BiasEnabled = false;
820 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
821 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000822 desc.m_DataLayout = armnn::DataLayout::NHWC;
Pablo Tellof0bd6832019-04-26 17:58:13 +0100823 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
824 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000825
telsoa01c577f2c2018-08-31 09:22:23 +0100826 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
827 CHECK_VALID_SIZE(inputs.size(), 2, 3);
828
829 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
830 CHECK_VALID_SIZE(outputs.size(), 1);
831
832 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
833 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
834
835 // assuming input is NHWC
836 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
837 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
838
839 // assuming the filter is OHWI : Output, H, W, Input
840 // which is essentially the same as NHWC
841 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
842 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
843
Pablo Tellof0bd6832019-04-26 17:58:13 +0100844 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
845 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
846 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
847 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100848
Matteo Martincigh747ef822018-12-18 09:26:39 +0000849 auto filterTensorAndData = CreateConstTensor(inputs[1],
850 filterTensorInfo,
851 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100852 armnn::IConnectableLayer* layer = nullptr;
telsoa01c577f2c2018-08-31 09:22:23 +0100853
James Ward58dec6b2020-09-11 17:32:44 +0100854 auto layerName = fmt::format("Conv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100855
856 if (inputs.size() == 3)
857 {
858 desc.m_BiasEnabled = true;
859 armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000860 auto biasTensorAndData = CreateConstTensor(inputs[2],
861 biasTensorInfo,
862 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100863 layer = m_Network->AddConvolution2dLayer(desc,
864 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100865 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100866 layerName.c_str());
867 }
868 else
869 {
870 layer = m_Network->AddConvolution2dLayer(desc,
871 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100872 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100873 layerName.c_str());
874 }
875
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100876 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100877
Sadik Armagand109a4d2020-07-28 10:42:13 +0100878 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +0000879 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100880
881 // register the input connection slots for the layer, connections are made after all layers have been created
882 // only the tensors for the inputs are relevant, exclude the const tensors
883 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000884 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100885
jimfly01c25411c2018-11-14 17:47:22 +0000886 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100887 // register the output connection slots for the layer, connections are made after all layers have been created
888 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
889 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
890}
891
892void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
893{
894 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
895
896 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
897 const auto * options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
898
899 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
900
901 DepthwiseConvolution2dDescriptor desc;
902 desc.m_BiasEnabled = false;
903 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
904 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
jimfly01c25411c2018-11-14 17:47:22 +0000905 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matthew Jacksond6a9dee2019-07-22 13:53:24 +0100906 CHECKED_NON_NEGATIVE(options->depth_multiplier);
telsoa01c577f2c2018-08-31 09:22:23 +0100907
908 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
909 CHECK_VALID_SIZE(inputs.size(), 2, 3);
910 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
911 CHECK_VALID_SIZE(outputs.size(), 1);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100912 desc.m_DilationX = CHECKED_NON_NEGATIVE(options->dilation_w_factor);
913 desc.m_DilationY = CHECKED_NON_NEGATIVE(options->dilation_h_factor);
Kevin May83add212019-03-26 11:39:19 +0000914
Keith Davis0c2eeac2020-02-11 16:51:50 +0000915 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
916 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +0100917
telsoa01c577f2c2018-08-31 09:22:23 +0100918 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Keith Davis0c2eeac2020-02-11 16:51:50 +0000919 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1], permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100920
Matteo Martincigh747ef822018-12-18 09:26:39 +0000921 // Assuming input is NHWC
telsoa01c577f2c2018-08-31 09:22:23 +0100922 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
923 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000924
925 // TensorflowLite weights come in the format [1, H, W, I * M]
telsoa01c577f2c2018-08-31 09:22:23 +0100926 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
927 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
928
Matteo Martincigh747ef822018-12-18 09:26:39 +0000929 // Reshape weights as [ H, W, I, M ]
930 filterTensorInfo.SetShape({ filterHeight,
931 filterWidth,
932 inputTensorInfo.GetShape()[3],
933 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
934
Pablo Tellof0bd6832019-04-26 17:58:13 +0100935 CalcPadding(inputHeight, filterHeight, desc.m_StrideY,
936 desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, options->padding);
937 CalcPadding(inputWidth, filterWidth, desc.m_StrideX,
938 desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
telsoa01c577f2c2018-08-31 09:22:23 +0100939
Matteo Martincigh747ef822018-12-18 09:26:39 +0000940 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
Matthew Jackson74bf7da2019-08-16 16:51:42 +0100941 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +0100942 auto layerName = fmt::format("DepthwiseConv2D:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +0100943
944 if (inputs.size() == 3)
945 {
946 desc.m_BiasEnabled = true;
947 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000948 auto biasTensorAndData = CreateConstTensor(inputs[2],
949 biasTensorInfo,
950 armnn::Optional<armnn::PermutationVector&>());
telsoa01c577f2c2018-08-31 09:22:23 +0100951 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
952 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100953 Optional<ConstTensor>(biasTensorAndData.first),
telsoa01c577f2c2018-08-31 09:22:23 +0100954 layerName.c_str());
955 }
956 else
957 {
958 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
959 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +0100960 EmptyOptional(),
telsoa01c577f2c2018-08-31 09:22:23 +0100961 layerName.c_str());
962 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100963 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +0100964
Sadik Armagand109a4d2020-07-28 10:42:13 +0100965 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
jimfly01c25411c2018-11-14 17:47:22 +0000966 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100967
968 // register the input connection slots for the layer, connections are made after all layers have been created
969 // only the tensors for the inputs are relevant, exclude the const tensors
970 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +0000971 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
telsoa01c577f2c2018-08-31 09:22:23 +0100972
jimfly01c25411c2018-11-14 17:47:22 +0000973 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
telsoa01c577f2c2018-08-31 09:22:23 +0100974 // register the output connection slots for the layer, connections are made after all layers have been created
975 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
976 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
977}
978
Finn Williamsed66d142019-12-06 09:55:55 +0000979void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
980{
981 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
982
983 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
984 CHECK_VALID_SIZE(inputs.size(), 1);
985
986 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
987 CHECK_VALID_SIZE(outputs.size(), 1);
988
James Ward58dec6b2020-09-11 17:32:44 +0100989 auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsed66d142019-12-06 09:55:55 +0000990
991 IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100992 ARMNN_ASSERT(layer != nullptr);
Finn Williamsed66d142019-12-06 09:55:55 +0000993
Sadik Armagand109a4d2020-07-28 10:42:13 +0100994 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Finn Williamsed66d142019-12-06 09:55:55 +0000995 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
996
997 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
998 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
999
1000 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1001 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1002}
1003
Derek Lambertif0176992020-04-28 13:37:49 +01001004void TfLiteParser::ParseExp(size_t subgraphIndex, size_t operatorIndex)
1005{
1006 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1007
1008 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1009 CHECK_VALID_SIZE(inputs.size(), 1);
1010
1011 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1012 CHECK_VALID_SIZE(outputs.size(), 1);
1013
James Ward58dec6b2020-09-11 17:32:44 +01001014 auto layerName = fmt::format("Exp:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01001015
1016 ElementwiseUnaryDescriptor desc;
1017 desc.m_Operation = UnaryOperation::Exp;
1018 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerName.c_str());
1019 ARMNN_ASSERT(layer != nullptr);
1020
Sadik Armagand109a4d2020-07-28 10:42:13 +01001021 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Derek Lambertif0176992020-04-28 13:37:49 +01001022 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1023
1024 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1025 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1026
1027 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1028 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1029}
1030
Keith Davis4cd29a02019-09-09 14:49:20 +01001031void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
1032{
1033 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1034
1035 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Kevin May85d92602019-09-27 17:21:06 +01001036 CHECK_VALID_SIZE(inputs.size(), 1, 2);
Keith Davis4cd29a02019-09-09 14:49:20 +01001037
1038 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1039 CHECK_VALID_SIZE(outputs.size(), 1);
1040
James Ward58dec6b2020-09-11 17:32:44 +01001041 auto layerName = fmt::format("Transpose:{}:{}", subgraphIndex, operatorIndex);
Mike Kelly08759e22020-03-02 11:41:31 +00001042 TransposeDescriptor desc;
Keith Davis4cd29a02019-09-09 14:49:20 +01001043
josh minorba424d22019-11-13 10:55:17 -06001044 if (inputs.size() == 2)
Kevin May85d92602019-09-27 17:21:06 +01001045 {
1046 armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
1047 BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
josh minorba424d22019-11-13 10:55:17 -06001048 auto numPermVecElements = permuteTensorInfo.GetNumElements();
1049 std::vector<unsigned int> permuteShape(numPermVecElements);
Kevin May85d92602019-09-27 17:21:06 +01001050 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
Mike Kelly08759e22020-03-02 11:41:31 +00001051 PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
Kevin May85d92602019-09-27 17:21:06 +01001052
Mike Kelly08759e22020-03-02 11:41:31 +00001053 desc = TransposeDescriptor(permutationVector);
Kevin May85d92602019-09-27 17:21:06 +01001054 }
1055
James Conroy05102392020-06-24 15:39:55 +01001056 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001057 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001058 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
Keith Davis4cd29a02019-09-09 14:49:20 +01001059
James Conroy05102392020-06-24 15:39:55 +01001060 IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001061 ARMNN_ASSERT(layer != nullptr);
Keith Davis4cd29a02019-09-09 14:49:20 +01001062 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1063
1064 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1065 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1066
1067 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1068 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1069}
1070
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001071void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
1072{
1073 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1074
1075 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1076 const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
1077
1078 TransposeConvolution2dDescriptor desc;
1079 desc.m_BiasEnabled = false;
1080 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1081 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1082 desc.m_DataLayout = armnn::DataLayout::NHWC;
1083
1084 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001085 CHECK_VALID_SIZE(inputs.size(), 3);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001086
1087 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1088 CHECK_VALID_SIZE(outputs.size(), 1);
1089
Colm Donelan0ad3ef12020-07-03 15:54:28 +01001090 if (inputs[0])
1091 {
1092 armnn::TensorInfo tensorInfo = ToTensorInfo(inputs[0]);
1093 std::vector<int> output_shape(tensorInfo.GetNumElements());
1094 if (tensorInfo.GetDataType() == DataType::Signed32)
1095 {
1096 ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes());
1097 }
1098 if (tensorInfo.GetDataType() == DataType::QAsymmU8)
1099 {
1100 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
1101 {
1102 output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i];
1103 }
1104 }
1105 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
1106 for (int dimension : output_shape)
1107 {
1108 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
1109 }
1110 desc.m_OutputShapeEnabled = true;
1111 }
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001112 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001113 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
1114
1115 // TfLite uses NHWC tensors
1116 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1117 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1118
1119 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
1120 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
1121
1122 CalcPadding(inputHeight,
1123 filterHeight,
1124 desc.m_StrideY,
1125 1, // DilationY
1126 desc.m_PadTop,
1127 desc.m_PadBottom,
1128 options->padding);
1129
1130 CalcPadding(inputWidth,
1131 filterWidth,
1132 desc.m_StrideX,
1133 1, // DilationX
1134 desc.m_PadLeft,
1135 desc.m_PadRight,
1136 options->padding);
1137
1138 auto filterTensorAndData = CreateConstTensor(inputs[1],
1139 filterTensorInfo,
1140 armnn::Optional<armnn::PermutationVector&>());
1141
1142 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01001143 auto layerName = fmt::format("TransposeConv:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001144
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001145 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1146 filterTensorAndData.first,
1147 EmptyOptional(),
1148 layerName.c_str());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001149
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001150 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001151
Sadik Armagand109a4d2020-07-28 10:42:13 +01001152 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001153 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1154
1155 // only the tensors for the inputs are relevant, exclude the const (filter) tensor
1156 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Matthew Jacksonccb25ea2019-08-20 17:18:33 +01001157 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
Matthew Jackson74bf7da2019-08-16 16:51:42 +01001158
1159 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1160 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1161}
1162
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001163void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
1164{
1165 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1166}
1167
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001168void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
1169{
1170 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1171
1172 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1173 CHECK_VALID_SIZE(inputs.size(), 3);
1174
1175 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1176 CHECK_VALID_SIZE(outputs.size(), 1);
1177
1178 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1179 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1180
1181 armnn::TensorInfo cropsTensorInfo = ToTensorInfo(inputs[2]);
1182 BufferRawPtr cropsBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1183
1184 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1185 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1186
1187 std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
1188 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.GetNumBytes());
1189
1190 size_t step = 2;
1191 std::vector<std::pair<unsigned int, unsigned int>> crops;
1192 for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
1193 {
1194 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1195 }
1196
1197 armnn::BatchToSpaceNdDescriptor desc;
1198 desc.m_BlockShape = blockShape;
1199 desc.m_Crops = crops;
1200 desc.m_DataLayout = armnn::DataLayout::NHWC;
1201
James Ward58dec6b2020-09-11 17:32:44 +01001202 auto layerName = fmt::format("BatchToSpaceND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001203
James Conroy05102392020-06-24 15:39:55 +01001204 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001205 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001206 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1207
1208 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1209 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesdb947e22019-02-08 18:52:21 -02001210 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1211
1212 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1213 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1214
1215 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1216 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1217}
1218
Matthew Jackson28c94572019-07-18 10:47:03 +01001219void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
1220{
1221 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1222
1223 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1224 CHECK_VALID_SIZE(inputs.size(), 1);
1225
1226 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1227 CHECK_VALID_SIZE(outputs.size(), 1);
1228
1229 L2NormalizationDescriptor desc;
1230 desc.m_DataLayout = armnn::DataLayout::NHWC;
James Ward58dec6b2020-09-11 17:32:44 +01001231 auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
Matthew Jackson28c94572019-07-18 10:47:03 +01001232 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1233
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001234 ARMNN_ASSERT(layer != nullptr);
Matthew Jackson28c94572019-07-18 10:47:03 +01001235
Sadik Armagand109a4d2020-07-28 10:42:13 +01001236 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jackson28c94572019-07-18 10:47:03 +01001237 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1238
1239 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1240 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1241
1242 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1243 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1244}
1245
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001246void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
1247{
1248 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1249}
1250
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001251void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
1252{
1253 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1254
1255 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1256 CHECK_VALID_SIZE(inputs.size(), 2);
1257
1258 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1259 CHECK_VALID_SIZE(outputs.size(), 1);
1260
James Ward58dec6b2020-09-11 17:32:44 +01001261 auto layerName = fmt::format("Maximum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001262
1263 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1264 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1265 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001266
Sadik Armagand109a4d2020-07-28 10:42:13 +01001267 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001268 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1269
1270 IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
1271 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001272 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1273
1274 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001275 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesb8d805e2019-02-12 22:57:13 -02001276
1277 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1278 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1279}
1280
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001281void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
1282{
1283 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1284
1285 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1286 CHECK_VALID_SIZE(inputs.size(), 2);
1287
1288 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1289 CHECK_VALID_SIZE(outputs.size(), 1);
1290
James Ward58dec6b2020-09-11 17:32:44 +01001291 auto layerName = fmt::format("Minimum:{}:{}", subgraphIndex, operatorIndex);
James Conroy05102392020-06-24 15:39:55 +01001292
1293 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1294 TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1295 CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001296
Sadik Armagand109a4d2020-07-28 10:42:13 +01001297 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001298 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1299
1300 IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
1301 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001302 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1303
1304 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001305 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalves8f6d7a72019-02-12 22:58:18 -02001306
1307 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1308 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1309}
1310
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001311void TfLiteParser::ParsePool(size_t subgraphIndex,
1312 size_t operatorIndex,
1313 PoolingAlgorithm algorithm)
1314{
1315 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1316
1317 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1318 const auto * options = operatorPtr->builtin_options.AsPool2DOptions();
1319
1320 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
1321
1322 std::string layerName;
1323
1324 switch (algorithm)
1325 {
1326 case PoolingAlgorithm::Average:
1327 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001328 fmt::format("AveragePool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001329 break;
1330 case PoolingAlgorithm::Max:
1331 layerName =
James Ward58dec6b2020-09-11 17:32:44 +01001332 fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001333 break;
1334 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001335 ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001336 }
1337
1338 Pooling2dDescriptor desc;
1339
1340 desc.m_PoolType = algorithm;
1341 desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
1342 desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
1343 desc.m_PoolWidth = CHECKED_NON_NEGATIVE(options->filter_width);
1344 desc.m_PoolHeight = CHECKED_NON_NEGATIVE(options->filter_height);
1345 desc.m_PaddingMethod = PaddingMethod::Exclude;
1346 desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
jimfly01c25411c2018-11-14 17:47:22 +00001347 desc.m_DataLayout = armnn::DataLayout::NHWC;
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001348
1349 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1350 CHECK_VALID_SIZE(inputs.size(), 1);
1351 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1352
1353 // assuming input is NHWC
1354 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1355 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1356
Pablo Tellof0bd6832019-04-26 17:58:13 +01001357 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, 1u,
1358 desc.m_PadTop, desc.m_PadBottom, options->padding);
1359 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, 1u,
1360 desc.m_PadLeft, desc.m_PadRight, options->padding);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001361
1362 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1363 CHECK_VALID_SIZE(outputs.size(), 1);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001364
Sadik Armagand109a4d2020-07-28 10:42:13 +01001365 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001366 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1367
1368 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1369 ARMNN_ASSERT(layer != nullptr);
jimfly01c25411c2018-11-14 17:47:22 +00001370 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001371
1372 // register the input connection slots for the layer, connections are made after all layers have been created
1373 // only the tensors for the inputs are relevant, exclude the const tensors
1374 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
jimfly01c25411c2018-11-14 17:47:22 +00001375 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001376
jimfly01c25411c2018-11-14 17:47:22 +00001377 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Nattapat Chaimanowongb66504b2018-10-17 15:19:14 +01001378 // register the output connection slots for the layer, connections are made after all layers have been created
1379 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1380 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1381}
1382
josh minorba424d22019-11-13 10:55:17 -06001383void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
1384{
1385 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1386
1387 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1388 CHECK_VALID_SIZE(inputs.size(), 3);
1389 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1390 CHECK_VALID_SIZE(outputs.size(), 1);
1391
1392 SliceDescriptor desc;
1393
1394 // set begin tensor info for slice descriptor
1395 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1396 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1397
1398 std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
1399 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1400
1401 // set size tensor info for slice descriptor
1402 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
1403 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1404
1405 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1406 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1407 desc = SliceDescriptor(begin, size);
1408
James Ward58dec6b2020-09-11 17:32:44 +01001409 auto layerName = fmt::format("Slice:{}:{}", subgraphIndex, operatorIndex);
josh minorba424d22019-11-13 10:55:17 -06001410
James Conroy05102392020-06-24 15:39:55 +01001411 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001412 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001413 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1414
1415 IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
josh minorba424d22019-11-13 10:55:17 -06001416 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1417
1418 // register the input connection slots for the layer, connections are made after all layers have been created
1419 // only the tensors for the inputs are relevant, exclude the const tensors
1420 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1421 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1422
1423 // register the output connection slots for the layer, connections are made after all layers have been created
1424 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1425 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1426}
1427
telsoa01c577f2c2018-08-31 09:22:23 +01001428void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
1429{
1430 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1431 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1432 const auto * options = operatorPtr->builtin_options.AsSoftmaxOptions();
1433
1434 SoftmaxDescriptor desc;
1435 desc.m_Beta = options->beta;
1436
1437 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1438 CHECK_VALID_SIZE(inputs.size(), 1);
1439 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1440 CHECK_VALID_SIZE(outputs.size(), 1);
1441
James Ward58dec6b2020-09-11 17:32:44 +01001442 auto layerName = fmt::format("Softmax:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001443 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1444
Sadik Armagand109a4d2020-07-28 10:42:13 +01001445 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
telsoa01c577f2c2018-08-31 09:22:23 +01001446 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1447
1448 // register the input connection slots for the layer, connections are made after all layers have been created
1449 // only the tensors for the inputs are relevant, exclude the const tensors
1450 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1451 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1452
1453 // register the output connection slots for the layer, connections are made after all layers have been created
1454 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1455 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1456}
1457
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001458void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
1459{
1460 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1461
1462 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1463 CHECK_VALID_SIZE(inputs.size(), 3);
1464
1465 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1466 CHECK_VALID_SIZE(outputs.size(), 1);
1467
1468 armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
1469 BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1470
1471 armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
1472 BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1473
1474 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1475 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1476
1477 std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
1478 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
1479
1480 size_t step = 2;
1481 std::vector<std::pair<unsigned int, unsigned int>> padList;
1482 for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
1483 {
1484 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1485 }
1486
1487 armnn::SpaceToBatchNdDescriptor desc;
1488 desc.m_BlockShape = blockShape;
1489 desc.m_PadList = padList;
1490 desc.m_DataLayout = armnn::DataLayout::NHWC;
1491
James Ward58dec6b2020-09-11 17:32:44 +01001492 auto layerName = fmt::format("SpaceToBatchND:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001493
James Conroy05102392020-06-24 15:39:55 +01001494 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001495 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001496 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
1497
1498 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1499 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbaded142019-02-08 19:02:48 -02001500 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1501
1502 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1503 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1504
1505 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1506 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1507}
1508
telsoa01c577f2c2018-08-31 09:22:23 +01001509armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
1510 const armnn::TensorInfo & inputTensorInfo)
1511{
1512 CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1513 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1514 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1515
1516 if (inputTensorInfo.GetNumDimensions() > 4)
1517 {
1518 std::stringstream ss;
1519 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1520 << " shape:" << inputTensorInfo.GetShape() << " "
1521 << CHECK_LOCATION().AsString();
1522 throw ParseException(ss.str());
1523 }
1524
1525 if (squeezeDims.empty())
1526 {
1527 squeezeDims.assign(dimensionSequence,
1528 dimensionSequence+inputTensorInfo.GetNumDimensions());
1529 }
1530
1531 std::vector<uint32_t> outputDims;
1532 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1533 {
1534 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1535 auto currentDimension = inputTensorInfo.GetShape()[i];
1536 if (skipSqueeze || currentDimension != 1)
1537 {
1538 outputDims.push_back(currentDimension);
1539 }
1540 }
1541
1542 if (outputDims.size() > 4)
1543 {
1544 std::stringstream ss;
1545 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1546 << " shape:" << inputTensorInfo.GetShape() << " "
1547 << CHECK_LOCATION().AsString();
1548 throw ParseException(ss.str());
1549 }
1550
1551 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1552 outputDims.data());
1553
1554 // we need to preserve the tensor type and the quantization data as well
1555 TensorInfo outTensorInfo = inputTensorInfo;
1556 outTensorInfo.SetShape(outShape);
1557
1558 return outTensorInfo;
1559}
1560
1561void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
1562{
1563 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1564
1565 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1566 CHECK_VALID_SIZE(inputs.size(), 1);
1567
1568 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1569 CHECK_VALID_SIZE(outputs.size(), 1);
1570
1571 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1572 const auto * options = operatorPtr->builtin_options.AsSqueezeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01001573 auto layerName = fmt::format("Squeeze:{}:{}", subgraphIndex, operatorIndex);
telsoa01c577f2c2018-08-31 09:22:23 +01001574
1575 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1576 armnn::TensorInfo outputTensorInfo =
1577 TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
1578 inputTensorInfo);
James Conroy05102392020-06-24 15:39:55 +01001579 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
telsoa01c577f2c2018-08-31 09:22:23 +01001580
1581 ReshapeDescriptor reshapeDesc;
1582 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
1583
telsoa01c577f2c2018-08-31 09:22:23 +01001584 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001585 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001586 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1587
1588 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1589 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1590
1591 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1592 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1593}
1594
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001595void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
1596{
1597 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1598
1599 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1600 CHECK_VALID_SIZE(inputs.size(), 4);
1601
1602 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1603 CHECK_VALID_SIZE(outputs.size(), 1);
1604
1605 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1606 const auto * options = operatorPtr->builtin_options.AsStridedSliceOptions();
1607
1608 StridedSliceDescriptor desc;
1609 desc.m_BeginMask = options->begin_mask;
1610 desc.m_EllipsisMask = options->ellipsis_mask;
1611 desc.m_EndMask = options->end_mask;
1612 desc.m_NewAxisMask = options->new_axis_mask;
1613 desc.m_ShrinkAxisMask = options->shrink_axis_mask;
1614 desc.m_DataLayout = armnn::DataLayout::NHWC;
1615
1616 armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
1617 BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1618
1619 std::vector<int> begin(beginTensorInfo.GetNumElements());
1620 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
1621
1622 armnn::TensorInfo endTensorInfo = ToTensorInfo(inputs[2]);
1623 BufferRawPtr endBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
1624
1625 std::vector<int> end(endTensorInfo.GetNumElements());
1626 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1627
1628 armnn::TensorInfo strideTensorInfo = ToTensorInfo(inputs[3]);
1629 BufferRawPtr strideBufferPtr = GetBuffer(m_Model, inputs[3]->buffer);
1630
1631 std::vector<int> stride(strideTensorInfo.GetNumElements());
1632 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1633
1634 desc.m_Begin = begin;
1635 desc.m_End = end;
1636 desc.m_Stride = stride;
1637
James Ward58dec6b2020-09-11 17:32:44 +01001638 auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001639 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001640 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001641
Sadik Armagand109a4d2020-07-28 10:42:13 +01001642 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves451d95b2019-02-12 22:59:22 -02001643 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1644
1645 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1646 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1647
1648 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1649 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1650}
1651
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001652void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
1653{
1654 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1655
1656 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1657 const auto * options = operatorPtr->builtin_options.AsSubOptions();
1658
1659 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1660 CHECK_VALID_SIZE(inputs.size(), 2);
1661
1662 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1663 CHECK_VALID_SIZE(outputs.size(), 1);
1664
1665 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1666 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1667
James Ward58dec6b2020-09-11 17:32:44 +01001668 auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001669 IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001670 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001671
Sadik Armagand109a4d2020-07-28 10:42:13 +01001672 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001673 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1674
1675 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001676 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesbbeae262019-02-07 18:37:39 -02001677
1678 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1679
1680 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1681 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1682}
1683
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301684void TfLiteParser::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
1685{
1686 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1687
1688 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1689 const auto * options = operatorPtr->builtin_options.AsDivOptions();
1690
1691 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1692 CHECK_VALID_SIZE(inputs.size(), 2);
1693
1694 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1695 CHECK_VALID_SIZE(outputs.size(), 1);
1696
1697 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1698 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1699
James Ward58dec6b2020-09-11 17:32:44 +01001700 auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301701 IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001702 ARMNN_ASSERT(layer != nullptr);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301703
Sadik Armagand109a4d2020-07-28 10:42:13 +01001704 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301705 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1706
1707 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001708 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Darshan Patel42b3d7d2020-05-25 22:30:07 +05301709 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1710
1711 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1712 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1713}
1714
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001715void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
1716{
1717 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1718
1719 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1720 const auto * options = operatorPtr->builtin_options.AsAddOptions();
1721
1722 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1723 CHECK_VALID_SIZE(inputs.size(), 2);
1724
1725 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1726 CHECK_VALID_SIZE(outputs.size(), 1);
1727
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001728 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1729 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1730
James Ward58dec6b2020-09-11 17:32:44 +01001731 auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001732 IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001733 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001734
Sadik Armagand109a4d2020-07-28 10:42:13 +01001735 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001736 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1737
1738 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001739 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesd4ac6a42018-12-18 12:56:22 -02001740 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1741
1742 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1743 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1744}
1745
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001746void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
1747{
1748 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1749
1750 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1751 const auto * options = operatorPtr->builtin_options.AsMulOptions();
1752
1753 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1754 CHECK_VALID_SIZE(inputs.size(), 2);
1755
1756 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1757 CHECK_VALID_SIZE(outputs.size(), 1);
1758
Bruno Goncalves9c761a62018-12-27 14:20:35 -02001759 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
1760 armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
1761
James Ward58dec6b2020-09-11 17:32:44 +01001762 auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001763 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001764 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001765
Sadik Armagand109a4d2020-07-28 10:42:13 +01001766 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001767 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1768
1769 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat16f82f92020-09-14 16:12:44 +01001770 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
Bruno Goncalvesf803f782018-12-18 13:40:30 -02001771 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
1772
1773 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1774 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1775}
1776
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001777void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
1778{
1779 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1780
1781 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1782
1783 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1784 CHECK_VALID_SIZE(outputs.size(), 1);
1785
1786 armnn::TensorInfo dimTensorInfo = ToTensorInfo(inputs[1]);
1787 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1788
1789 armnn::MeanDescriptor desc;
1790 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1791 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1792 desc.m_Axis = axis;
1793
1794 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001795 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001796
1797 desc.m_KeepDims =
1798 inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
1799 true : false;
1800
James Ward58dec6b2020-09-11 17:32:44 +01001801 auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001802 IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01001803 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves2235cee2018-12-19 12:51:45 -02001804
1805 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1806
1807 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1808 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1809
1810 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1811 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1812}
1813
Darshan Patel83fcf982020-05-26 22:22:42 +05301814void TfLiteParser::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
1815{
1816 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1817
1818 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1819 CHECK_VALID_SIZE(inputs.size(), 1);
1820
1821 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1822 CHECK_VALID_SIZE(outputs.size(), 1);
1823
James Ward58dec6b2020-09-11 17:32:44 +01001824 auto layerName = fmt::format("Neg:{}:{}", subgraphIndex, operatorIndex);
Darshan Patel83fcf982020-05-26 22:22:42 +05301825 armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Neg);
1826 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
1827 ARMNN_ASSERT(layer != nullptr);
1828
Sadik Armagand109a4d2020-07-28 10:42:13 +01001829 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Darshan Patel83fcf982020-05-26 22:22:42 +05301830 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1831
1832 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1833 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1834
1835 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1836 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1837}
1838
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001839void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
1840{
1841 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1842
1843 TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1844
1845 TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1846 CHECK_VALID_SIZE(outputs.size(), 1);
1847
1848 armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
1849 BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
1850
1851 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1852 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1853
1854 size_t step = 2;
1855 armnn::PadDescriptor desc;
1856 for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1857 {
1858 desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1859 }
1860
James Ward58dec6b2020-09-11 17:32:44 +01001861 auto layerName = fmt::format("Pad:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01001862 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01001863
1864 IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
1865 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves6c2355b2018-12-19 12:52:01 -02001866 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1867
1868 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1869 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1870
1871 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1872 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1873}
1874
Sadik Armagan66dedc72019-12-10 16:32:07 +00001875void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
1876{
1877 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1878
1879 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1880 CHECK_VALID_SIZE(inputs.size(), 1);
1881
1882 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1883 CHECK_VALID_SIZE(outputs.size(), 1);
1884
James Ward58dec6b2020-09-11 17:32:44 +01001885 auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001886
1887 IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001888 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001889
Sadik Armagand109a4d2020-07-28 10:42:13 +01001890 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan66dedc72019-12-10 16:32:07 +00001891 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1892
1893 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1894 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1895
1896 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1897 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1898}
Finn Williamsc42c3842019-01-22 14:18:11 +00001899
Sadik Armagan58f39192018-09-17 14:14:39 +01001900void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
1901{
Finn Williamsc42c3842019-01-22 14:18:11 +00001902 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
Sadik Armagan58f39192018-09-17 14:14:39 +01001903}
1904
1905void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
1906{
Finn Williamsc42c3842019-01-22 14:18:11 +00001907 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1908}
Sadik Armagan58f39192018-09-17 14:14:39 +01001909
Sadik Armagan12239e72020-05-27 11:06:17 +01001910void TfLiteParser::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
1911{
Jan Eilers2f746b32020-07-28 14:00:06 +01001912 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
Sadik Armagan12239e72020-05-27 11:06:17 +01001913}
1914
Finn Williamsc42c3842019-01-22 14:18:11 +00001915void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
1916{
1917 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1918}
1919
Nina Drozd99851762019-04-09 09:37:38 +01001920void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
1921{
1922 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1923}
1924
Jan Eilers2f746b32020-07-28 14:00:06 +01001925void TfLiteParser::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
1926{
1927 ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
1928}
Finn Williamsc42c3842019-01-22 14:18:11 +00001929
1930void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
1931{
1932 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Sadik Armagan58f39192018-09-17 14:14:39 +01001933 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Jan Eilers8eb25602020-03-09 12:13:48 +00001934 IgnoreUnused(operatorPtr);
Sadik Armagan58f39192018-09-17 14:14:39 +01001935
1936 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
1937 CHECK_VALID_SIZE(inputs.size(), 1);
1938
1939 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
1940 CHECK_VALID_SIZE(outputs.size(), 1);
1941
James Ward58dec6b2020-09-11 17:32:44 +01001942 auto layerName = fmt::format("Activation:");
Sadik Armagan58f39192018-09-17 14:14:39 +01001943 ActivationDescriptor activationDesc;
Finn Williamsc42c3842019-01-22 14:18:11 +00001944 activationDesc.m_Function = activationType;
1945
1946 switch (activationType)
1947 {
1948 case ActivationFunction::ReLu:
1949 {
James Ward58dec6b2020-09-11 17:32:44 +01001950 layerName += fmt::format("RELU:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00001951 break;
1952 }
1953 case ActivationFunction::BoundedReLu:
1954 {
James Ward58dec6b2020-09-11 17:32:44 +01001955 layerName += fmt::format("RELU6:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00001956 activationDesc.m_A = 6.0f;
1957 activationDesc.m_B = 0.0f;
1958 break;
1959 }
1960 case ActivationFunction::Sigmoid:
1961 {
James Ward58dec6b2020-09-11 17:32:44 +01001962 layerName += fmt::format("SIGMOID:{}:{}", subgraphIndex, operatorIndex);
Finn Williamsc42c3842019-01-22 14:18:11 +00001963 break;
1964 }
Nina Drozd99851762019-04-09 09:37:38 +01001965 case ActivationFunction::TanH:
1966 {
James Ward58dec6b2020-09-11 17:32:44 +01001967 layerName += fmt::format("TANH:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd99851762019-04-09 09:37:38 +01001968 activationDesc.m_A = 1.0f;
1969 activationDesc.m_B = 1.0f;
1970 break;
1971 }
Sadik Armagan12239e72020-05-27 11:06:17 +01001972 case ActivationFunction::LeakyReLu:
1973 {
James Ward58dec6b2020-09-11 17:32:44 +01001974 layerName += fmt::format("LEAKYRELU:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan12239e72020-05-27 11:06:17 +01001975 const auto * options = operatorPtr->builtin_options.AsLeakyReluOptions();
1976 activationDesc.m_A = options->alpha;
1977 break;
1978 }
Jan Eilers2f746b32020-07-28 14:00:06 +01001979 case ActivationFunction::HardSwish:
James Ward58dec6b2020-09-11 17:32:44 +01001980 layerName += fmt::format("HARDSWISH:{}:{}", subgraphIndex, operatorIndex);
Jan Eilers2f746b32020-07-28 14:00:06 +01001981 break;
Finn Williamsc42c3842019-01-22 14:18:11 +00001982 default:
1983 {
1984 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001985 fmt::format("Unexpected ActivationFunction[{}] when creating layerName {} ",
1986 static_cast<int>(activationType), CHECK_LOCATION().AsString()));
Finn Williamsc42c3842019-01-22 14:18:11 +00001987 }
1988 }
1989
1990 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Sadik Armagan58f39192018-09-17 14:14:39 +01001991
Sadik Armagand109a4d2020-07-28 10:42:13 +01001992 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan58f39192018-09-17 14:14:39 +01001993 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1994
1995 // register the input connection slots for the layer, connections are made after all layers have been created
1996 // only the tensors for the inputs are relevant, exclude the const tensors
1997 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1998 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1999
2000 // register the output connection slots for the layer, connections are made after all layers have been created
2001 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2002 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2003}
Sadikb94967b2018-09-19 15:30:00 +01002004armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
2005 const std::vector<int32_t> & targetDimsIn)
2006{
2007 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2008 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2009
2010 if (stretchDim != targetDimsIn.end())
2011 {
2012 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2013 {
2014 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002015 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
Sadikb94967b2018-09-19 15:30:00 +01002016 }
2017
2018 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002019 armnn::numeric_cast<unsigned int>(
Sadikb94967b2018-09-19 15:30:00 +01002020 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2021
2022 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2023 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2024 }
2025
2026 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2027
2028 TensorInfo reshapeInfo = inputTensorInfo;
2029 reshapeInfo.SetShape(outputShape);
2030
2031 return reshapeInfo;
2032}
2033
2034void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
2035{
2036 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2037
2038 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002039
2040 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2041 CHECK_VALID_SIZE(outputs.size(), 1);
2042
2043 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2044 const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
James Ward58dec6b2020-09-11 17:32:44 +01002045 auto layerName = fmt::format("Reshape:{}:{}", subgraphIndex, operatorIndex);
Sadikb94967b2018-09-19 15:30:00 +01002046
2047 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
kevmay0171972a82018-12-17 14:28:03 +00002048 armnn::TensorInfo actualOutputTensorInfo = ToTensorInfo(outputs[0]);
James Conroy05102392020-06-24 15:39:55 +01002049 CheckMatchingQuantization(inputTensorInfo, actualOutputTensorInfo, layerName, "Input 0", "Output 0");
Derek Lambertic9e52792020-03-11 11:42:26 +00002050
Jan Eilersbac9b352020-07-13 13:40:24 +01002051 // Extracting new shape for the output
2052 // There are two ways it can be passed
2053 // * First is to define the target shape in the operator built-in options
2054 // * Second is to pass it as a second input tensor
Derek Lambertic9e52792020-03-11 11:42:26 +00002055 std::vector<int32_t> targetShape;
Jan Eilersbac9b352020-07-13 13:40:24 +01002056 bool targetShapeFound = false;
2057 // Check if built-in options were given
2058 if (options != nullptr)
Derek Lambertic9e52792020-03-11 11:42:26 +00002059 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002060 // make sure the parameter is given
2061 if (options->new_shape.empty() == false)
Derek Lambertic9e52792020-03-11 11:42:26 +00002062 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002063 targetShape = options->new_shape;
2064 targetShapeFound = true;
Derek Lambertif4a953f2020-03-17 14:25:57 +00002065 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002066 }
Jan Eilersbac9b352020-07-13 13:40:24 +01002067
2068 // If there is no built-in option given or if the built-in new_shape parameter was empty
2069 if (!targetShapeFound)
Derek Lambertic9e52792020-03-11 11:42:26 +00002070 {
Jan Eilersbac9b352020-07-13 13:40:24 +01002071 // Check for a second input tensor
2072 if (inputs.size() > 1 && inputs[1] != nullptr)
2073 {
2074 if (inputs[1]->is_variable)
2075 {
2076 ARMNN_THROW_PARSE_EXCEPTION( "Target shapes defined in non-const input tensors is not supported");
2077 }
2078
2079 if (inputs[1]->shape.size() != 1)
2080 {
2081 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not a 1D tensor");
2082 }
2083
2084 if (inputs[1]->type != tflite::TensorType_INT32)
2085 {
2086 ARMNN_THROW_PARSE_EXCEPTION("Target 'shape' input is not an int32 type");
2087 }
2088
2089 // Extract target shape from input
2090 auto bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2091 auto values = reinterpret_cast<const int32_t*>(bufferPtr->data.data());
2092 for (int i=0; i < inputs[1]->shape[0]; ++i)
2093 {
2094 targetShape.push_back(values[i]);
2095 }
2096 }
2097 else
Derek Lambertic9e52792020-03-11 11:42:26 +00002098 {
2099 ARMNN_THROW_PARSE_EXCEPTION("Target shape not defined in reshape parameters or input tensor. "
2100 "At least one method required");
2101 }
Derek Lambertic9e52792020-03-11 11:42:26 +00002102 }
2103
kevmay0171972a82018-12-17 14:28:03 +00002104 armnn::TensorInfo reshapeOutputTensorInfo =
Derek Lambertic9e52792020-03-11 11:42:26 +00002105 TfLiteParser::OutputShapeOfReshape(inputTensorInfo, targetShape);
Sadikb94967b2018-09-19 15:30:00 +01002106
kevmay0171972a82018-12-17 14:28:03 +00002107 // Check for valid input size and that reshape parameters equal output shape
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002108 const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
2109 if (inputs.size() > 1 && !CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
kevmay0171972a82018-12-17 14:28:03 +00002110 {
2111 std::stringstream ss;
2112 ss << "New shape defined in reshape parameters "
Aron Virginas-Tar70672f62019-01-23 14:00:00 +00002113 << reshapeOutputTensorShape
kevmay0171972a82018-12-17 14:28:03 +00002114 << " does not equal output shape "
2115 << actualOutputTensorInfo.GetShape()
2116 << ": "
2117 << CHECK_LOCATION().AsString();
2118 throw ParseException(ss.str());
2119 }
2120
Sadikb94967b2018-09-19 15:30:00 +01002121 ReshapeDescriptor reshapeDesc;
kevmay0171972a82018-12-17 14:28:03 +00002122 reshapeDesc.m_TargetShape = reshapeOutputTensorInfo.GetShape();
Sadikb94967b2018-09-19 15:30:00 +01002123
Sadikb94967b2018-09-19 15:30:00 +01002124 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002125 ARMNN_ASSERT(layer != nullptr);
kevmay0171972a82018-12-17 14:28:03 +00002126 layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
Sadikb94967b2018-09-19 15:30:00 +01002127
2128 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2129 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2130
2131 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2132 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2133}
2134
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002135void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
2136{
Sadik Armagana3b31f02019-12-05 09:08:53 +00002137 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
2138}
2139
2140void TfLiteParser::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
2141{
2142 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
2143}
2144
2145void TfLiteParser::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
2146{
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002147 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2148
2149 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2150 CHECK_VALID_SIZE(inputs.size(), 2);
2151
2152 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2153 CHECK_VALID_SIZE(outputs.size(), 1);
2154
2155 armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[1]);
2156
2157 // Data for the parsed tensor args (size) must be stored locally.
2158 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2159
2160 BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2161 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2162
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002163 ResizeDescriptor desc;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002164 desc.m_Method = resizeMethod;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002165 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002166 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2167 desc.m_DataLayout = armnn::DataLayout::NHWC;
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002168
James Ward58dec6b2020-09-11 17:32:44 +01002169 auto layerName = fmt::format("Resize:");
Sadik Armagana3b31f02019-12-05 09:08:53 +00002170
2171 switch (resizeMethod)
2172 {
2173 case ResizeMethod::Bilinear:
2174 {
James Ward58dec6b2020-09-11 17:32:44 +01002175 layerName += fmt::format("BILINEAR:{}:{}", subgraphIndex, operatorIndex);
Sang-Hoon Park820eb142020-01-08 10:25:24 +00002176
2177 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2178 const auto * options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2179
David Monahan4a0c9b92020-05-30 09:48:39 +01002180 desc.m_AlignCorners = options->align_corners;
Sadik Armagana3b31f02019-12-05 09:08:53 +00002181 break;
2182 }
2183 case ResizeMethod::NearestNeighbor:
2184 {
James Ward58dec6b2020-09-11 17:32:44 +01002185 layerName += fmt::format("NEARESTNEIGHBOR:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagana3b31f02019-12-05 09:08:53 +00002186 break;
2187 }
2188 default:
2189 {
2190 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002191 fmt::format("Unexpected ResizeMethod[{}] when creating layerName {} ",
2192 static_cast<int>(resizeMethod), CHECK_LOCATION().AsString()));
Sadik Armagana3b31f02019-12-05 09:08:53 +00002193 }
2194 }
2195
James Conroy05102392020-06-24 15:39:55 +01002196 TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002197 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002198 CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
2199
2200 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
2201 ARMNN_ASSERT(layer != nullptr);
Bruno Goncalves3f58ddb2019-02-07 18:40:11 -02002202 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2203
2204 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2205 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2206
2207 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2208 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2209}
2210
Sadik Armagan479045b2018-10-01 11:51:37 +01002211void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
2212{
2213 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2214
2215 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2216 const auto * options = operatorPtr->builtin_options.AsConcatenationOptions();
2217
2218 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2219
2220 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2221 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2222 CHECK_VALID_SIZE(outputs.size(), 1);
2223
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002224 unsigned int numConcatView = static_cast<unsigned int>(inputs.size());
2225 uint32_t inputRank = ToTensorInfo(inputs[0]).GetNumDimensions();
Sadik Armagan479045b2018-10-01 11:51:37 +01002226
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002227 const unsigned int concatDimInput = static_cast<unsigned int>(
2228 (static_cast<int>(inputRank) + options->axis) % static_cast<int>(inputRank));
Sadik Armagan479045b2018-10-01 11:51:37 +01002229
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002230 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2231 concatDescriptor.SetConcatAxis(concatDimInput);
Sadik Armagan479045b2018-10-01 11:51:37 +01002232
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002233 unsigned int mergeDimOrigin = 0;
Sadik Armagan479045b2018-10-01 11:51:37 +01002234
2235 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2236 {
2237 TensorInfo inputTensorInfo = ToTensorInfo(inputs[viewIndex]);
2238
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002239 // This set up concatDescriptor view origin
2240 armnnUtils::ProcessConcatInputTensorInfo(
2241 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
Sadik Armagan479045b2018-10-01 11:51:37 +01002242 }
2243
James Ward58dec6b2020-09-11 17:32:44 +01002244 auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagand109a4d2020-07-28 10:42:13 +01002245 TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
James Conroy05102392020-06-24 15:39:55 +01002246
Jim Flynn906f9462019-05-10 13:55:21 +01002247 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002248 ARMNN_ASSERT(layer != nullptr);
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002249 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Sadik Armagan479045b2018-10-01 11:51:37 +01002250
James Conroy05102392020-06-24 15:39:55 +01002251 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002252 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
Sadik Armagan479045b2018-10-01 11:51:37 +01002253
Nattapat Chaimanowong5e9d2982019-01-25 13:20:39 +00002254 // add fused activation layer
2255 layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
Sadik Armagan479045b2018-10-01 11:51:37 +01002256
Sadik Armagan479045b2018-10-01 11:51:37 +01002257 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2258 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2259}
2260
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002261void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
2262{
2263 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2264
2265 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2266 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2267
2268 CHECK_SUPPORTED_FUSED_ACTIVATION(options, subgraphIndex, operatorIndex);
2269
2270 FullyConnectedDescriptor desc;
2271 desc.m_BiasEnabled = false;
Nattapat Chaimanowongd8eee592018-10-26 10:24:14 +01002272 desc.m_TransposeWeightMatrix = true;
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002273
2274 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2275 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2276 CHECK_VALID_SIZE(outputs.size(), 1);
2277
2278 armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
2279
2280 // Fully Connected Layer accepts two dimensional weights input
2281 int32_t weightsDimension = static_cast<int32_t>(filterTensorInfo.GetNumDimensions());
2282 if (weightsDimension != 2)
2283 {
2284 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002285 fmt::format("Dimension {} for Fully Connected weights is not supported by Armnn. "
2286 "Node {}",
2287 weightsDimension,
2288 CHECK_LOCATION().AsString()));
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002289 }
2290
Matteo Martincigh747ef822018-12-18 09:26:39 +00002291 auto filterTensorAndData = CreateConstTensor(inputs[1],
2292 filterTensorInfo,
2293 armnn::Optional<armnn::PermutationVector&>());
Matthew Jackson74bf7da2019-08-16 16:51:42 +01002294 armnn::IConnectableLayer* layer = nullptr;
James Ward58dec6b2020-09-11 17:32:44 +01002295 auto layerName = fmt::format("FullyConnected:{}:{}", subgraphIndex, operatorIndex);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002296
2297 if (inputs.size() == 3)
2298 {
2299 desc.m_BiasEnabled = true;
2300 TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
Matteo Martincigh747ef822018-12-18 09:26:39 +00002301 auto biasTensorAndData = CreateConstTensor(inputs[2],
2302 biasTensorInfo,
2303 armnn::Optional<armnn::PermutationVector&>());
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002304 layer = m_Network->AddFullyConnectedLayer(desc,
2305 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002306 Optional<ConstTensor>(biasTensorAndData.first),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002307 layerName.c_str());
2308 }
2309 else
2310 {
2311 layer = m_Network->AddFullyConnectedLayer(desc,
2312 filterTensorAndData.first,
Matteo Martincighfc598e12019-05-14 10:36:13 +01002313 EmptyOptional(),
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002314 layerName.c_str());
2315 }
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002316 ARMNN_ASSERT(layer != nullptr);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002317
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002318 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2319
2320 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2321
2322 if (inputTensorInfo.GetNumDimensions() > 2)
2323 {
2324 // Add reshape to flatten to 2D [batch_size, input_size],
2325 // where "input_size" corresponds to the number of inputs to the layer,
2326 // matching the second dimension of weights,
2327 // and "batch_size" is calculated by dividing the number of elements by "input_size".
2328 std::vector<unsigned int> reshapedDimensions(2);
2329 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2330 reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
2331
2332 if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
2333 {
2334 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002335 fmt::format("Failed to deduce input tensor shape from filter size {} {}",
2336 reshapedDimensions[1],
2337 CHECK_LOCATION().AsString()));
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002338 }
2339
2340 armnn::TensorInfo reshapedTensorInfo = ToTensorInfo(inputs[0]);
2341 reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
2342
James Ward58dec6b2020-09-11 17:32:44 +01002343 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002344 armnn::ReshapeDescriptor desc;
2345 desc.m_TargetShape = reshapedTensorInfo.GetShape();
2346 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2347
2348 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
2349 reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
2350
2351 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2352 }
2353 else
2354 {
2355 // register the input connection slot for the layer
2356 // only the tensors for the inputs are relevant, exclude the const tensors
2357 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2358 }
2359
Sadik Armagand109a4d2020-07-28 10:42:13 +01002360 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002361 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2362
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002363 // we need to add the activation layer and fortunately we don't need to care about the data layout
2364 armnn::IConnectableLayer* fusedActivationLayer = AddFusedActivationLayer(layer, 0,
2365 options->fused_activation_function);
Narumol Prangnawarat501f4d42019-04-24 15:52:20 +01002366
Sadik Armagan8853c1f2018-10-22 09:04:18 +01002367 // register the output connection slots for the layer, connections are made after all layers have been created
2368 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2369 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2370}
2371
keidav011b3e2ea2019-02-21 10:07:37 +00002372void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
2373{
2374 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2375
2376 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2377
2378 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2379 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2380 CHECK_VALID_SIZE(outputs.size(), 4);
2381
2382 // Obtain custom options from flexbuffers
2383 auto custom_options = operatorPtr->custom_options;
2384 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2385
2386 // Obtain descriptor information from tf lite
2387 DetectionPostProcessDescriptor desc;
2388 desc.m_MaxDetections = m["max_detections"].AsUInt32();
2389 desc.m_MaxClassesPerDetection = m["max_classes_per_detection"].AsUInt32();
2390 desc.m_NmsScoreThreshold = m["nms_score_threshold"].AsFloat();
2391 desc.m_NmsIouThreshold = m["nms_iou_threshold"].AsFloat();
2392 desc.m_NumClasses = m["num_classes"].AsUInt32();
2393 desc.m_ScaleH = m["h_scale"].AsFloat();
2394 desc.m_ScaleW = m["w_scale"].AsFloat();
2395 desc.m_ScaleX = m["x_scale"].AsFloat();
2396 desc.m_ScaleY = m["y_scale"].AsFloat();
2397
keidav0107d58c72019-02-26 11:57:39 +00002398 if (!(m["use_regular_nms"].IsNull()))
keidav011b3e2ea2019-02-21 10:07:37 +00002399 {
keidav0107d58c72019-02-26 11:57:39 +00002400 desc.m_UseRegularNms = m["use_regular_nms"].AsBool();
keidav011b3e2ea2019-02-21 10:07:37 +00002401 }
2402 if (!(m["detections_per_class"].IsNull()))
2403 {
2404 desc.m_DetectionsPerClass = m["detections_per_class"].AsUInt32();
2405 }
2406
2407 if (desc.m_NmsIouThreshold <= 0.0f || desc.m_NmsIouThreshold > 1.0f)
2408 {
2409 throw InvalidArgumentException("DetectionPostProcessTFLiteParser: Intersection over union threshold "
2410 "must be positive and less than or equal to 1.");
2411 }
2412
2413 armnn::TensorInfo anchorTensorInfo = ToTensorInfo(inputs[2]);
2414 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2415 armnn::Optional<armnn::PermutationVector&>());
2416
James Ward58dec6b2020-09-11 17:32:44 +01002417 auto layerName = fmt::format("DetectionPostProcess:{}:{}", subgraphIndex, operatorIndex);
keidav011b3e2ea2019-02-21 10:07:37 +00002418 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2419 layerName.c_str());
2420
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002421 ARMNN_ASSERT(layer != nullptr);
keidav011b3e2ea2019-02-21 10:07:37 +00002422
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002423 // The model does not specify the output shapes.
2424 // The output shapes are calculated from the max_detection and max_classes_per_detection.
2425 unsigned int numDetectedBox = desc.m_MaxDetections * desc.m_MaxClassesPerDetection;
2426 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2427 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2428 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2429 m_OverridenOutputShapes.push_back({ 1 });
2430
keidav011b3e2ea2019-02-21 10:07:37 +00002431 for (unsigned int i = 0 ; i < outputs.size() ; ++i)
2432 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00002433 armnn::TensorInfo detectionBoxOutputTensorInfo = ToTensorInfo(outputs[i], m_OverridenOutputShapes[i]);
keidav011b3e2ea2019-02-21 10:07:37 +00002434 layer->GetOutputSlot(i).SetTensorInfo(detectionBoxOutputTensorInfo);
2435 }
2436
2437 // Register the input connection slots for the layer, connections are made after all layers have been created
2438 // only the tensors for the inputs are relevant, exclude the const tensors
2439 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2440 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2441
2442 // Register the output connection slots for the layer, connections are made after all layers have been created
2443 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2444 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2445 outputTensorIndexes[1],
2446 outputTensorIndexes[2],
2447 outputTensorIndexes[3]});
2448}
2449
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002450/// The TfLite Pack operator is equivalent to the ArmNN Stack operator
2451void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
2452{
2453 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2454
2455 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2456 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2457 CHECK_VALID_SIZE(outputs.size(), 1);
2458
2459 if (inputs.size() < 1)
2460 {
2461 throw ParseException("Pack must have at least one input.");
2462 }
2463
2464 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2465 const auto* options = operatorPtr->builtin_options.AsPackOptions();
2466
2467 StackDescriptor desc;
2468 desc.m_Axis = static_cast<uint32_t>(options->axis);
2469 desc.m_NumInputs = static_cast<uint32_t>(inputs.size());
2470
2471 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2472 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
2473 desc.m_InputShape = inputTensorInfo.GetShape();
2474
James Ward58dec6b2020-09-11 17:32:44 +01002475 auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002476 IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
2477
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002478 ARMNN_ASSERT(layer != nullptr);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002479
Sadik Armagand109a4d2020-07-28 10:42:13 +01002480 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
Matthew Jacksonbcca1f42019-07-16 11:39:21 +01002481 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2482
2483 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2484 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2485
2486 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2487 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2488}
2489
Nina Drozd200e3802019-04-15 09:47:39 +01002490void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
2491{
2492 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2493
2494 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2495 const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
2496
2497 // This unpackAxis indicates the axis to unpack
2498 const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
2499
2500 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2501 CHECK_VALID_SIZE(inputs.size(), 1);
2502
2503 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002504
2505 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2506 {
2507 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002508 fmt::format("The unpack axis: {} cannot be greater than or equal to "
2509 "the number of input dimension {} {}",
2510 unpackAxis,
2511 inputTensorInfo.GetNumDimensions(),
2512 CHECK_LOCATION().AsString()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002513 }
2514
Nina Drozd200e3802019-04-15 09:47:39 +01002515 unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
2516 // If num is not defined, automatically infer from the length of the dimension axis.
2517 if(unpackNum == 0)
2518 {
2519 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2520 }
2521
2522 // If unpack number cannot be inferred and is still zero, throw ParseException.
2523 if(unpackNum == 0)
2524 {
2525 throw ParseException("Number to unpack must greater than zero.");
2526 }
2527
2528 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2529 CHECK_VALID_SIZE(outputs.size(), unpackNum);
2530
2531 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2532 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2533
2534 // Add current input shape to unpackDimSizes
2535 for (unsigned int i = 0; i < inputDimSize; ++i)
2536 {
2537 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2538 }
2539
2540 if (unpackDimSizes[unpackAxis] != unpackNum)
2541 {
2542 throw ParseException("Number to unpack must be the same as length of the dimension to "
2543 "unpack along.");
2544 }
2545
2546 unpackDimSizes[unpackAxis] /= unpackNum;
2547
2548 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2549 for (unsigned int j = 0; j < unpackNum; ++j)
2550 {
2551 // Set the size of the views.
2552 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2553 {
2554 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2555 }
2556 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
2557 }
2558
James Ward58dec6b2020-09-11 17:32:44 +01002559 auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd200e3802019-04-15 09:47:39 +01002560 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002561 ARMNN_ASSERT(layer != nullptr);
Nina Drozd200e3802019-04-15 09:47:39 +01002562
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002563 TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
2564 unpackDimSizes.data());
2565
Nina Drozd200e3802019-04-15 09:47:39 +01002566 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2567 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2568
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002569 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
2570 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2571 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002572 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[k], true);
James Ward58dec6b2020-09-11 17:32:44 +01002573 std::string reshapeLayerName = fmt::format("Reshape_for:{}", layer->GetName());
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002574 armnn::ReshapeDescriptor desc;
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002575 desc.m_TargetShape = outputTensorInfo.GetShape();
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002576 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(desc, layerName.c_str());
2577
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002578 layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(splitOutShape,
2579 outputTensorInfo.GetDataType(),
2580 outputTensorInfo.GetQuantizationScale(),
2581 outputTensorInfo.GetQuantizationOffset()));
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002582 layer->GetOutputSlot(k).Connect(reshapeLayer->GetInputSlot(0));
2583
Narumol Prangnawarat2c526462019-10-21 14:58:26 +01002584 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
Narumol Prangnawarat672de572019-04-23 15:28:06 +01002585
2586 uint32_t reshapedOutputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[k]);
2587 armnn::IOutputSlot* slot = &(reshapeLayer->GetOutputSlot(0));
2588 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2589 }
Nina Drozd200e3802019-04-15 09:47:39 +01002590}
2591
Nina Drozd0324f482019-04-08 10:52:10 +01002592void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
2593{
2594 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2595
2596 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2597 const auto * options = operatorPtr->builtin_options.AsSplitOptions();
2598
2599 const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
2600
Nina Drozd200e3802019-04-15 09:47:39 +01002601 // If number of splits cannot be inferred and is zero, throw ParseException.
2602 if(numSplits == 0)
2603 {
2604 throw ParseException("Number to splits must greater than zero.");
2605 }
2606
Nina Drozd0324f482019-04-08 10:52:10 +01002607 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2608 CHECK_VALID_SIZE(inputs.size(), 2);
2609 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2610 CHECK_VALID_SIZE(outputs.size(), numSplits);
2611
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002612 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
2613 armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
Nina Drozd0324f482019-04-08 10:52:10 +01002614
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002615 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
2616 std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
2617 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2618
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002619 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002620 const unsigned int splitDim = axisData[0];
Nina Drozd0324f482019-04-08 10:52:10 +01002621
Nina Drozd0324f482019-04-08 10:52:10 +01002622 auto inputDimSize = inputTensorInfo.GetNumDimensions();
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002623 if (inputDimSize > MaxNumOfTensorDimensions)
Nina Drozd0324f482019-04-08 10:52:10 +01002624 {
2625 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002626 fmt::format("The number of dimensions: {} for input tensors of the split op cannot be greater than {} {}",
2627 inputTensorInfo.GetNumDimensions(),
2628 MaxNumOfTensorDimensions,
2629 CHECK_LOCATION().AsString()));
Nina Drozd0324f482019-04-08 10:52:10 +01002630 }
2631
2632 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2633
2634 // Add current input shape to splitterDimSizes
2635 for (unsigned int i = 0; i < inputDimSize; ++i)
2636 {
2637 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2638 }
2639
2640 if (splitterDimSizes[splitDim] % numSplits != 0)
2641 {
2642 throw ParseException("Number of splits must evenly divide the dimension");
2643 }
2644 splitterDimSizes[splitDim] /= numSplits;
2645
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002646 SplitterDescriptor splitDesc(numSplits, inputDimSize);
Nina Drozd0324f482019-04-08 10:52:10 +01002647 for (unsigned int j = 0; j < numSplits; ++j)
2648 {
2649 // Set the size of the views.
2650 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2651 {
2652 splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2653 }
2654 splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
2655 }
2656
James Ward58dec6b2020-09-11 17:32:44 +01002657 auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
Nina Drozd0324f482019-04-08 10:52:10 +01002658 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002659 ARMNN_ASSERT(layer != nullptr);
Nina Drozd0324f482019-04-08 10:52:10 +01002660
2661 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
Narumol Prangnawarat17660e62019-04-18 16:56:19 +01002662 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
Nina Drozd0324f482019-04-08 10:52:10 +01002663
Nina Drozd0324f482019-04-08 10:52:10 +01002664 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2665 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002666 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Francis Murtagh98d6b3d2019-10-21 10:52:54 +01002667 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
Nina Drozd0324f482019-04-08 10:52:10 +01002668 }
2669
2670 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2671 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2672}
2673
Derek Lambertif0176992020-04-28 13:37:49 +01002674unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
2675{
2676 int numDims = armnn::numeric_cast<int>(numDimsIn);
2677 int v = idx < 0 ? numDims + idx : idx;
2678 ARMNN_ASSERT(v >= 0);
2679 ARMNN_ASSERT(v < numDims);
2680
2681 return static_cast<unsigned int>(v);
2682}
2683
2684void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
2685{
2686 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2687
2688 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
Ryan OShea86704732020-05-26 11:41:04 +01002689 const auto * options = operatorPtr->builtin_options.AsSplitVOptions();
Derek Lambertif0176992020-04-28 13:37:49 +01002690
2691 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2692 CHECK_VALID_SIZE(inputs.size(), 3);
2693
2694 auto& inputTensor = inputs[0];
2695 auto& splitsTensor = inputs[1];
2696 auto& axisTensor = inputs[2];
2697
2698 armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
2699 armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
2700 armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
2701 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
2702
2703 // Inputs
2704 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2705 if (inputDimSize > MaxNumOfTensorDimensions)
2706 {
2707 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002708 fmt::format("The number of dimensions: {} for input tensors of the "
2709 "SplitV op cannot be greater than {} {}",
2710 inputTensorInfo.GetNumDimensions(),
2711 MaxNumOfTensorDimensions,
2712 CHECK_LOCATION().AsString()));
Derek Lambertif0176992020-04-28 13:37:49 +01002713 }
2714
2715 // Get split axis
2716 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
2717 std::vector<int> axisData(axisTensorInfo.GetNumElements());
2718 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
2719 const unsigned int splitDim = ComputeWrappedIndex(axisData[0], inputTensorInfo.GetNumDimensions());
2720
Derek Lambertif0176992020-04-28 13:37:49 +01002721 // Set split sizes
Derek Lambertif0176992020-04-28 13:37:49 +01002722 CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
Ryan OShea86704732020-05-26 11:41:04 +01002723 unsigned int numSplits{0};
2724
2725 if(options)
Derek Lambertif0176992020-04-28 13:37:49 +01002726 {
2727 numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
Derek Lambertif0176992020-04-28 13:37:49 +01002728 }
2729 else
2730 {
Ryan OShea86704732020-05-26 11:41:04 +01002731 numSplits = splitsInfo.GetNumElements();
Derek Lambertif0176992020-04-28 13:37:49 +01002732 }
2733
2734 if (numSplits <=0)
2735 {
2736 throw ParseException("SplitV has invalid number of splits");
2737 }
2738
Jan Eilersc0761e92020-06-29 16:48:44 +01002739 std::vector<int> splitsData(numSplits);
Ryan OShea86704732020-05-26 11:41:04 +01002740 BufferRawPtr splitsBufferPtr = GetBuffer(m_Model, splitsTensor->buffer);
Jan Eilersc0761e92020-06-29 16:48:44 +01002741 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.GetNumBytes());
Ryan OShea86704732020-05-26 11:41:04 +01002742
Jan Eilersc0761e92020-06-29 16:48:44 +01002743 unsigned int idx = 0;
Ryan OShea86704732020-05-26 11:41:04 +01002744 int numInferred{0};
2745 unsigned int inferIdx{0};
2746 int splitSum{0};
2747 for (auto split : splitsData)
2748 {
2749 if (split < 0)
2750 {
2751 numInferred++;
2752 inferIdx = idx;
2753 }
2754 else
2755 {
2756 splitSum += split;
2757 }
2758 idx++;
2759 }
2760 // Check for inferred Axis
2761 if (numInferred == 0)
2762 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002763 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
Ryan OShea86704732020-05-26 11:41:04 +01002764 {
2765 throw ParseException("SplitV split_sizes does not sum to the dimension of value along split_dim.");
2766 }
2767 }
2768 else if (numInferred == 1)
2769 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002770 splitsData[inferIdx] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
Ryan OShea86704732020-05-26 11:41:04 +01002771 }
2772 else
2773 {
2774 throw ParseException("Cannot infer split size for more than one split");
2775 }
2776
Derek Lambertif0176992020-04-28 13:37:49 +01002777 //Ouput size validation
2778 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2779 CHECK_VALID_SIZE(outputs.size(), numSplits);
2780
2781 // Setup Armnn descriptor
2782 SplitterDescriptor splitDesc(numSplits, inputDimSize);
2783 unsigned int accumSplit = 0;
2784 for (unsigned int j = 0; j < numSplits; ++j)
2785 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002786 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsData[j]);
Derek Lambertif0176992020-04-28 13:37:49 +01002787
2788 // Set the size of the views.
2789 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
2790 {
2791 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
2792 if (dimIdx == splitDim)
2793 {
2794 dimSize = splitSize;
2795 }
2796 splitDesc.SetViewSize(j, dimIdx, dimSize);
2797 }
2798
2799 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
2800 accumSplit += splitSize;
2801 }
2802
James Ward58dec6b2020-09-11 17:32:44 +01002803 auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
Derek Lambertif0176992020-04-28 13:37:49 +01002804 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
James Conroy05102392020-06-24 15:39:55 +01002805 ARMNN_ASSERT(layer != nullptr);
Derek Lambertif0176992020-04-28 13:37:49 +01002806
2807 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2808 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2809
2810 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
2811 {
Sadik Armagand109a4d2020-07-28 10:42:13 +01002812 armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
Derek Lambertif0176992020-04-28 13:37:49 +01002813 layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
2814 }
2815
2816 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2817 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2818}
2819
Inki Daed4619e22020-09-10 15:33:54 +09002820void TfLiteParser::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
2821{
2822 const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2823 const auto *options = operatorPtr->builtin_options.AsArgMaxOptions();
2824
2825 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2826 auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
2827 CHECK_VALID_SIZE(inputs.size(), 2);
2828
2829 auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
2830 CHECK_VALID_SIZE(outputs.size(), 1);
2831
James Ward58dec6b2020-09-11 17:32:44 +01002832 auto layerName = fmt::format("ArgMax:{}:{}", subgraphIndex, operatorIndex);
Inki Daed4619e22020-09-10 15:33:54 +09002833
2834 armnn::TensorInfo sizeTensorInfo0 = ToTensorInfo(inputs[0]);
2835 armnn::TensorInfo sizeTensorInfo1 = ToTensorInfo(inputs[1]);
2836
2837 // Get const axis value from model and set it to descriptor.
2838 BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
2839
2840 ArgMinMaxDescriptor desc;
2841 desc.m_Axis = axisBufferPtr->data.data()[0];
2842 // If output_type is int32 then set Signed32 else Signed64. Default type is Signed64.
2843 desc.m_Output_Type = options->output_type == 3 ? armnn::DataType::Signed32 : armnn::DataType::Signed64;
2844 desc.m_Function = ArgMinMaxFunction::Max;
2845
2846 // Register a ArgMax layer.
2847 IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerName.c_str());
2848
2849 armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
2850 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2851
2852 // Register input tensor to the layer.
2853 auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2854 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2855
2856 // Register output tensor to the layer.
2857 auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2858 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2859}
2860
Sadik Armagan58f39192018-09-17 14:14:39 +01002861armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
2862 unsigned int outputSlot,
2863 tflite::ActivationFunctionType activationType)
telsoa01c577f2c2018-08-31 09:22:23 +01002864{
2865 ActivationDescriptor activationDesc;
2866 std::string layerName = prevLayer->GetName();
2867
2868 switch(activationType)
2869 {
2870 case tflite::ActivationFunctionType_NONE:
2871 {
2872 // this is a no-op: return previous layer
2873 return prevLayer;
2874 }
2875 case tflite::ActivationFunctionType_RELU:
2876 {
2877 activationDesc.m_Function = ActivationFunction::ReLu;
2878 layerName += ":RELU";
2879 break;
2880 }
2881 case tflite::ActivationFunctionType_RELU6:
2882 {
2883 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2884 activationDesc.m_A = 6.0f;
2885 activationDesc.m_B = 0.0f;
2886 layerName += ":RELU6";
2887 break;
2888 }
2889 case tflite::ActivationFunctionType_TANH:
2890 {
2891 activationDesc.m_Function = ActivationFunction::TanH;
2892 activationDesc.m_A = 1.0f;
2893 activationDesc.m_B = 1.0f;
2894 layerName += ":TANH";
2895 break;
2896 }
2897
2898 // I only put these here as a reminder what others we could support
2899 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2900 case tflite::ActivationFunctionType_SIGN_BIT:
2901 default:
2902 {
2903 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002904 fmt::format("TfLite parser doesn't suppport fused activation: "
2905 "{}/{} {} ",
2906 activationType,
2907 tflite::EnumNameActivationFunctionType(activationType),
2908 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01002909
2910 }
2911 }
2912
2913 IConnectableLayer* activationLayer =
2914 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2915
2916 auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
2917 prevOutputSlot.Connect(activationLayer->GetInputSlot(0));
2918 activationLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
2919 return activationLayer;
2920}
2921
2922TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
2923{
2924 if (fileName == nullptr)
2925 {
James Ward58dec6b2020-09-11 17:32:44 +01002926 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
telsoa01c577f2c2018-08-31 09:22:23 +01002927 CHECK_LOCATION().AsString()));
2928 }
Francis Murtagh532a29d2020-06-29 11:50:01 +01002929 std::error_code errorCode;
2930 fs::path pathToFile(fileName);
2931 if (!fs::exists(pathToFile, errorCode))
telsoa01c577f2c2018-08-31 09:22:23 +01002932 {
James Ward58dec6b2020-09-11 17:32:44 +01002933 //fmt::format() could not be used here (format error)
2934 std::stringstream msg;
2935 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
2936 << " " << CHECK_LOCATION().AsString();
2937
2938 throw FileNotFoundException(msg.str());
telsoa01c577f2c2018-08-31 09:22:23 +01002939 }
2940 std::ifstream file(fileName, std::ios::binary);
2941 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2942 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2943 fileContent.size());
2944}
2945
2946TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
2947{
2948 if (binaryContent == nullptr)
2949 {
James Ward58dec6b2020-09-11 17:32:44 +01002950 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
telsoa01c577f2c2018-08-31 09:22:23 +01002951 CHECK_LOCATION().AsString()));
2952 }
2953 flatbuffers::Verifier verifier(binaryContent, len);
2954 if (verifier.VerifyBuffer<tflite::Model>() == false)
2955 {
2956 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002957 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
2958 "flatbuffers format. size:{} {}",
2959 len,
2960 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01002961 }
2962 return tflite::UnPackModel(binaryContent);
2963}
2964
2965TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
2966 size_t subgraphIndex,
2967 size_t operatorIndex)
2968{
2969 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2970
Derek Lambertiff05cc52019-04-26 13:05:17 +01002971 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2972 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002973
2974 size_t inputCount = operatorPtr->inputs.size();
2975 TensorRawPtrVector result(inputCount);
2976 for (size_t i=0; i<inputCount; ++i)
2977 {
2978 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002979 result[i] = subgraphPtr->tensors[inputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01002980 }
2981 return result;
2982}
2983
2984TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
2985 size_t subgraphIndex,
2986 size_t operatorIndex)
2987{
2988 CHECK_MODEL(model, subgraphIndex, operatorIndex);
2989
Derek Lambertiff05cc52019-04-26 13:05:17 +01002990 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2991 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01002992
2993 size_t outputCount = operatorPtr->outputs.size();
2994 TensorRawPtrVector result(outputCount);
2995 for (size_t i=0; i<outputCount; ++i)
2996 {
2997 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2998 CHECK_TENSOR(model, subgraphIndex, outputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01002999 result[i] = subgraphPtr->tensors[outputId].get();
telsoa01c577f2c2018-08-31 09:22:23 +01003000 }
3001 return result;
3002}
3003
3004TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
3005 size_t subgraphIndex)
3006{
3007 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003008 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003009
Derek Lambertiff05cc52019-04-26 13:05:17 +01003010 size_t inputCount = subgraphPtr->inputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003011 TensorIdRawPtrVector result(inputCount);
3012 for (size_t i=0; i<inputCount; ++i)
3013 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003014 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
telsoa01c577f2c2018-08-31 09:22:23 +01003015 CHECK_TENSOR(model, subgraphIndex, inputId);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003016 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003017 }
3018 return result;
3019}
3020
3021TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
3022 size_t subgraphIndex)
3023{
3024 CHECK_SUBGRAPH(model, subgraphIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003025 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003026
Derek Lambertiff05cc52019-04-26 13:05:17 +01003027 size_t outputCount = subgraphPtr->outputs.size();
telsoa01c577f2c2018-08-31 09:22:23 +01003028 TensorIdRawPtrVector result(outputCount);
3029 for (size_t i=0; i<outputCount; ++i)
3030 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003031 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3032 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
telsoa01c577f2c2018-08-31 09:22:23 +01003033 }
3034 return result;
3035}
3036
3037std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
3038 size_t subgraphIndex,
3039 size_t operatorIndex)
3040{
3041 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003042 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3043 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003044 return operatorPtr->inputs;
3045}
3046
3047std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
3048 size_t subgraphIndex,
3049 size_t operatorIndex)
3050{
3051 CHECK_MODEL(model, subgraphIndex, operatorIndex);
Derek Lambertiff05cc52019-04-26 13:05:17 +01003052 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3053 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
telsoa01c577f2c2018-08-31 09:22:23 +01003054 return operatorPtr->outputs;
3055}
3056
3057void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
3058 size_t operatorIndex,
3059 IConnectableLayer* layer,
3060 const std::vector<unsigned int>& tensorIndexes)
3061{
3062 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003063 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003064 if (tensorIndexes.size() != layer->GetNumInputSlots())
3065 {
3066 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003067 fmt::format("The number of tensor inputs ({}) does not match the number expected ({})"
3068 " for subgraph:{} operator index:{} {}",
3069 tensorIndexes.size(),
3070 layer->GetNumInputSlots(),
3071 subgraphIndex,
3072 operatorIndex,
3073 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003074 }
3075
3076 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
3077 {
3078 unsigned int tensorIndex = tensorIndexes[slotIndex];
3079 armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
3080 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
3081 }
3082}
3083
3084void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
3085 size_t operatorIndex,
3086 IConnectableLayer* layer,
3087 const std::vector<unsigned int>& tensorIndexes)
3088{
3089 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003090 ARMNN_ASSERT(layer != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01003091 if (tensorIndexes.size() != layer->GetNumOutputSlots())
3092 {
3093 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003094 fmt::format("The number of tensor outputs ({}) does not match the number expected ({})"
3095 " for subgraph:{} operator index:{} {}",
3096 tensorIndexes.size(),
3097 layer->GetNumOutputSlots(),
3098 subgraphIndex,
3099 operatorIndex,
3100 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003101 }
3102
3103 for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
3104 {
3105 unsigned int tensorIndex = tensorIndexes[slotIndex];
3106 armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
3107 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
3108 }
3109}
3110
3111void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
3112{
3113 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3114
3115 auto inputs = GetSubgraphInputs(m_Model, subgraphIndex);
3116 for (auto const & tensorIdAndPtr : inputs)
3117 {
3118 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3119 IConnectableLayer* layer =
3120 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3121
3122 auto tensorInfo = ToTensorInfo(tensorIdAndPtr.second);
3123 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3124
3125 RegisterOutputSlots(subgraphIndex,
3126 VIRTUAL_OPERATOR_ID,
3127 layer,
3128 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3129 }
3130}
3131
3132void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
3133{
3134 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3135
3136 auto outputs = GetSubgraphOutputs(m_Model, subgraphIndex);
3137 for (auto const & tensorIdAndPtr : outputs)
3138 {
3139 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3140 IConnectableLayer* layer =
3141 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3142
3143 RegisterInputSlots(subgraphIndex,
3144 VIRTUAL_OPERATOR_ID,
3145 layer,
3146 { static_cast<uint32_t>(tensorIdAndPtr.first) });
3147 }
3148}
3149
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003150void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
3151{
3152 CHECK_SUBGRAPH(m_Model, subgraphIndex);
3153
Derek Lambertiff05cc52019-04-26 13:05:17 +01003154 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003155 for (unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
3156 {
3157 for (unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
3158 {
3159 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot == nullptr &&
3160 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
3161 {
Derek Lambertiff05cc52019-04-26 13:05:17 +01003162 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003163 armnn::TensorInfo tensorInfo = ToTensorInfo(tensorPtr);
3164 auto tensorAndData = CreateConstTensor(tensorPtr,
3165 tensorInfo,
3166 armnn::Optional<armnn::PermutationVector&>());
3167
James Ward58dec6b2020-09-11 17:32:44 +01003168 std::string layerName = fmt::format("Constant:{}", tensorPtr->name);
Bruno Goncalves3d7efe92018-12-27 14:21:43 -02003169 IConnectableLayer *layer =
3170 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
3171
3172 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
3173 RegisterOutputSlots(subgraphIndex,
3174 VIRTUAL_OPERATOR_ID,
3175 layer,
3176 { tensorIndex });
3177
3178 }
3179 }
3180 }
3181}
3182
telsoa01c577f2c2018-08-31 09:22:23 +01003183// example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
3184TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
3185{
3186 CHECK_BUFFER(model, bufferIndex);
3187 return model->buffers[bufferIndex].get();
3188}
3189
Matteo Martincigh747ef822018-12-18 09:26:39 +00003190template<typename T>
3191std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
3192TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
3193 TfLiteParser::TensorRawPtr tensorPtr,
3194 armnn::TensorInfo& tensorInfo,
3195 armnn::Optional<armnn::PermutationVector&> permutationVector)
3196{
3197 auto constData = CreateConstTensorImpl<T>(bufferPtr,
3198 tensorPtr,
3199 tensorInfo,
3200 permutationVector);
3201 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
3202 return std::make_pair(constData.first, std::move(storage));
3203}
3204
telsoa01c577f2c2018-08-31 09:22:23 +01003205std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
3206TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
Matteo Martincigh747ef822018-12-18 09:26:39 +00003207 armnn::TensorInfo& tensorInfo,
3208 armnn::Optional<armnn::PermutationVector&> permutationVector)
telsoa01c577f2c2018-08-31 09:22:23 +01003209{
3210 CHECK_TENSOR_PTR(tensorPtr);
3211 auto bufferPtr = GetBuffer(m_Model, tensorPtr->buffer);
3212 CHECK_BUFFER_SIZE(bufferPtr, tensorInfo, tensorPtr->buffer);
3213
3214 switch (tensorInfo.GetDataType())
3215 {
3216 case armnn::DataType::Float32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003217 return CreateConstTensorAndStoreData<float>(bufferPtr,
3218 tensorPtr,
3219 tensorInfo,
3220 permutationVector);
Derek Lambertif90c56d2020-01-10 17:14:08 +00003221 case armnn::DataType::QAsymmU8:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003222 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
3223 tensorPtr,
3224 tensorInfo,
3225 permutationVector);
Keith Davisd305e1a2020-01-22 11:57:54 +00003226 case armnn::DataType::QSymmS8:
3227 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3228 tensorPtr,
3229 tensorInfo,
3230 permutationVector);
Keith Davis67e6c542020-02-19 10:08:33 +00003231 case armnn::DataType::QAsymmS8:
3232 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3233 tensorPtr,
3234 tensorInfo,
3235 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003236 case armnn::DataType::Signed32:
Matteo Martincigh747ef822018-12-18 09:26:39 +00003237 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
3238 tensorPtr,
3239 tensorInfo,
3240 permutationVector);
telsoa01c577f2c2018-08-31 09:22:23 +01003241 default:
3242 {
3243 std::stringstream errString;
3244 errString << "Unexpected datatype when creating const tensor: "
3245 << armnn::GetDataTypeName(tensorInfo.GetDataType())
3246 << " shape:" << tensorInfo.GetShape()
3247 << CHECK_LOCATION().AsString();
3248 throw ParseException(errString.str());
3249 }
3250 }
3251}
3252
3253BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
3254 const std::string& name) const
3255{
3256 CHECK_SUBGRAPH(m_Model, subgraphId);
3257 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3258 for (auto const & input : inputs)
3259 {
3260 if (input.second->name == name)
3261 {
3262 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3263 return std::make_pair(bindingId, ToTensorInfo(input.second));
3264 }
3265 }
3266
3267 std::stringstream bindings;
3268 for (auto const & input : inputs)
3269 {
3270 bindings << "'" << input.second->name << "' ";
3271 }
3272
3273 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003274 fmt::format("No input binding found for subgraph:{} and name:{}. "
3275 "Possible inputs are: [{}] {}",
3276 subgraphId,
3277 name,
3278 bindings.str(),
3279 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003280}
3281
3282BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
3283 const std::string& name) const
3284{
3285 CHECK_SUBGRAPH(m_Model, subgraphId);
3286 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003287 for (unsigned int i = 0; i < outputs.size(); ++i)
telsoa01c577f2c2018-08-31 09:22:23 +01003288 {
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003289 auto const output = outputs[i];
telsoa01c577f2c2018-08-31 09:22:23 +01003290 if (output.second->name == name)
3291 {
3292 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
Narumol Prangnawarat4628d052019-02-25 17:26:05 +00003293 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3294 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3295 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
telsoa01c577f2c2018-08-31 09:22:23 +01003296 }
3297 }
3298
3299 std::stringstream bindings;
3300 for (auto const & output : outputs)
3301 {
3302 bindings << "'" << output.second->name << "' ";
3303 }
3304
3305 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003306 fmt::format("No output binding found for subgraph:{} and name:{}. "
3307 "Possible outputs are: [{}] {}",
3308 subgraphId,
3309 name,
3310 bindings.str(),
3311 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01003312}
3313
3314size_t TfLiteParser::GetSubgraphCount() const
3315{
3316 return m_Model->subgraphs.size();
3317}
3318
3319std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
3320{
3321 CHECK_SUBGRAPH(m_Model, subgraphId);
3322 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3323 std::vector<std::string> result;
3324 result.reserve(inputs.size());
3325 for (auto const & input : inputs)
3326 {
3327 result.push_back(input.second->name);
3328 }
3329 return result;
3330}
3331
3332std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
3333{
3334 CHECK_SUBGRAPH(m_Model, subgraphId);
3335 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3336 std::vector<std::string> result;
3337 result.reserve(outputs.size());
3338 for (auto const & output : outputs)
3339 {
3340 result.push_back(output.second->name);
3341 }
3342 return result;
3343}
3344
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003345ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003346{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003347 return new TfLiteParser(options);
telsoa01c577f2c2018-08-31 09:22:23 +01003348}
3349
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003350ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
telsoa01c577f2c2018-08-31 09:22:23 +01003351{
Aron Virginas-Tarc975f922019-10-23 17:38:17 +01003352 return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
telsoa01c577f2c2018-08-31 09:22:23 +01003353}
3354
3355void ITfLiteParser::Destroy(ITfLiteParser* parser)
3356{
3357 delete parser;
3358}
3359
3360TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
3361: m_FloatData(std::move(data))
3362, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003363, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003364, m_Int32Data(nullptr)
3365{
3366}
3367
3368TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
3369: m_FloatData(nullptr)
3370, m_Uint8Data(std::move(data))
Keith Davisd305e1a2020-01-22 11:57:54 +00003371, m_Int8Data(nullptr)
3372, m_Int32Data(nullptr)
3373{
3374}
3375
3376TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
3377: m_FloatData(nullptr)
3378, m_Uint8Data(nullptr)
3379, m_Int8Data(std::move(data))
telsoa01c577f2c2018-08-31 09:22:23 +01003380, m_Int32Data(nullptr)
3381{
3382}
3383
3384TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
3385: m_FloatData(nullptr)
3386, m_Uint8Data(nullptr)
Keith Davisd305e1a2020-01-22 11:57:54 +00003387, m_Int8Data(nullptr)
telsoa01c577f2c2018-08-31 09:22:23 +01003388, m_Int32Data(std::move(data))
3389{
3390}
3391
3392} // armnnTfLiteParser