blob: aec8df887648b71d805f185d0efbdfb20564604f [file] [log] [blame]
surmeh01bceff2f2018-03-29 16:29:27 +01001//
Teresa Charlin52664732020-06-29 16:27:03 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
surmeh01bceff2f2018-03-29 16:29:27 +01004//
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00005
surmeh01bceff2f2018-03-29 16:29:27 +01006#include "TfParser.hpp"
7
surmeh01bceff2f2018-03-29 16:29:27 +01008#include <armnn/TypesUtils.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +01009#include <armnn/Descriptors.hpp>
10
Matteo Martincighe011d202019-11-28 11:35:47 +000011#include <armnnUtils/Permute.hpp>
12#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly08759e22020-03-02 11:41:31 +000013#include <armnnUtils/Transpose.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000014#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan589e3e82020-09-11 16:17:48 +010015#include <armnn/utility/NumericCast.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010016#include <armnn/utility/PolymorphicDowncast.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000017
surmeh01bceff2f2018-03-29 16:29:27 +010018#include <GraphTopologicalSort.hpp>
Sadik Armagan479045b2018-10-01 11:51:37 +010019#include <ParserHelper.hpp>
surmeh01bceff2f2018-03-29 16:29:27 +010020
21#include <google/protobuf/io/zero_copy_stream_impl.h>
22#include <google/protobuf/text_format.h>
23
Derek Lambertibaa177f2019-12-10 22:00:43 +000024#include <tensorflow/core/framework/graph.pb.h>
surmeh01bceff2f2018-03-29 16:29:27 +010025
James Ward58dec6b2020-09-11 17:32:44 +010026#include <fmt/core.h>
Jan Eilersba3ef182020-09-25 08:36:44 +010027#include <fmt/format.h>
surmeh01bceff2f2018-03-29 16:29:27 +010028#include <numeric>
surmeh01bceff2f2018-03-29 16:29:27 +010029
Matteo Martincigh46315822018-11-28 16:22:36 +000030using namespace armnnUtils;
surmeh01bceff2f2018-03-29 16:29:27 +010031using namespace armnn;
32
33namespace armnnTfParser
34{
35namespace
36{
37
38const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
39const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
40
surmeh01bceff2f2018-03-29 16:29:27 +010041
42template <typename Callable>
43void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
44 const std::string& attribName,
45 tensorflow::AttrValue::ValueCase expectedValueCase,
46 Callable callable)
47{
48 auto iter = nodeDef.attr().find(attribName);
49 if (iter != nodeDef.attr().end())
50 {
51 const auto& attrValue = iter->second;
52 if (attrValue.value_case() == expectedValueCase)
53 {
54 callable(attrValue);
55 }
56 else
57 {
telsoa01c577f2c2018-08-31 09:22:23 +010058 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +010059 fmt::format("Attribute {} of node {} expected to have {} as tensorflow::AttrValue::ValueCase, "
60 "but found {} instead {}",
61 attribName,
62 nodeDef.name(),
63 static_cast<int>(expectedValueCase),
64 static_cast<int>(attrValue.value_case()),
65 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010066 }
67 }
68 else
69 {
telsoa01c577f2c2018-08-31 09:22:23 +010070 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +010071 fmt::format("Could not find required attribute {} in node {} {}",
72 attribName,
73 nodeDef.name(),
74 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +010075 }
76}
77
78template <typename Callable>
79void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
80 const std::string& attribName,
81 tensorflow::AttrValue::ValueCase expectedValueCase,
82 Callable callable)
83{
84 auto iter = nodeDef.attr().find(attribName);
85 if (iter != nodeDef.attr().end())
86 {
87 const auto& attrValue = iter->second;
88 if (attrValue.value_case() == expectedValueCase)
89 {
90 callable(attrValue);
91 }
92 else
93 {
telsoa01c577f2c2018-08-31 09:22:23 +010094 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +010095 fmt::format("Attribute {} of node {} expected to have {} as tensorflow::AttrValue::ValueCase, "
96 "but found {} instead {}",
97 attribName,
98 nodeDef.name(),
99 static_cast<int>(expectedValueCase),
100 static_cast<int>(attrValue.value_case()),
101 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100102 }
103 }
104}
105
106float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
107{
108 float attribValue = 0.0f;
109 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
110 [&attribValue](const tensorflow::AttrValue& attrValue)
111 {
112 attribValue = attrValue.f();
113 });
114 return attribValue;
115}
116
Conor Kennedyc2130a02018-12-05 11:05:54 +0000117int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
118{
119 int32_t attribValue = 0u;
120 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
121 [&attribValue](const tensorflow::AttrValue& attrValue)
122 {
123 attribValue = static_cast<int32_t>(attrValue.i());
124 });
125 return attribValue;
126}
127
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000128bool ReadMandatoryNodeBoolAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
129{
130 bool attribValue = false;
131 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
132 [&attribValue](const tensorflow::AttrValue& attrValue)
133 {
134 attribValue = static_cast<bool>(attrValue.b());
135 });
136 return attribValue;
137}
138
surmeh01bceff2f2018-03-29 16:29:27 +0100139uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
140{
141 uint32_t attribValue = 0u;
142 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
143 [&attribValue](const tensorflow::AttrValue& attrValue)
144 {
145 attribValue = static_cast<uint32_t>(attrValue.i());
146 });
147 return attribValue;
148}
149
150std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
151{
152 std::string attribValue = "";
153 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
154 [&attribValue](const tensorflow::AttrValue& attrValue)
155 {
156 attribValue = attrValue.s();
157 });
158 return attribValue;
159}
160
161std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
162 const std::string& name)
163{
164 std::vector<uint32_t> attriList;
165 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
166 [&attriList](const tensorflow::AttrValue& attrValue)
167 {
168 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
169 {
170 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
171 }
172 });
173
174 return attriList;
175}
176
177std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
178 const std::string& name)
179{
180 std::vector<uint32_t> attriList;
181 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
182 [&attriList](const tensorflow::AttrValue& attrValue)
183 {
184 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
185 {
186 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
187 }
188 });
189
190 return attriList;
191}
192
Aron Virginas-Tar2e259272019-11-27 13:29:51 +0000193std::string ReadOptionalNodeStringAttribute(const tensorflow::NodeDef& nodeDef,
194 const std::string& name,
195 const std::string& defaultValue = "")
196{
197 std::string attribValue = defaultValue;
198 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
199 [&attribValue](const tensorflow::AttrValue& attrValue)
200 {
201 attribValue = attrValue.s();
202 });
203 return attribValue;
204}
205
surmeh01bceff2f2018-03-29 16:29:27 +0100206bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
207 const std::string& name,
208 bool defaultValue = false)
209{
210 bool attribValue = defaultValue;
211 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
212 [&attribValue](const tensorflow::AttrValue& attrValue)
213 {
214 attribValue = attrValue.b();
215 });
216 return attribValue;
217}
218
219tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
220{
221 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
222 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
223 [&attribValue](const tensorflow::AttrValue& attrValue)
224 {
225 attribValue = attrValue.type();
226 });
227 return attribValue;
228}
229
230TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
231{
232 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
233 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
234
235 if (stretchDim != targetDims.end())
236 {
237 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
238 {
telsoa01c577f2c2018-08-31 09:22:23 +0100239 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100240 fmt::format("At most one component of shape can be -1 {}",
241 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100242 }
243
telsoa01c577f2c2018-08-31 09:22:23 +0100244 auto targetNumElements =
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100245 armnn::numeric_cast<unsigned int>(
telsoa01c577f2c2018-08-31 09:22:23 +0100246 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
surmeh01bceff2f2018-03-29 16:29:27 +0100247 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
248 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
249 }
250
251 TensorInfo reshapeInfo = input;
252 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
253
254 return reshapeInfo;
255}
256
telsoa01c577f2c2018-08-31 09:22:23 +0100257// We need the input0Slot to guide the reshape for input1Slot.
saoste01bbd40612018-08-28 15:41:51 +0100258IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
259 INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100260{
261 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
262 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
263 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
264 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
265 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
266 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
267
268 armnn::TensorInfo reshapedInfo = input1Info;
269 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
270
271 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
272 ReshapeDescriptor reshapeDesc;
273 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
274 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
275
276 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
277 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
278
279 input1Slot = &reshapeLayer->GetOutputSlot(0);
280
281 return input1Slot;
282}
283
284OutputId ParseOutputId(const std::string & name)
285{
286 unsigned int outputNum = 0;
287 size_t colonPos = name.find_last_of(":");
288 if (colonPos != std::string::npos)
289 {
290 int n = std::stoi(name.substr(colonPos+1));
291 if (n<0 || n>100)
292 {
telsoa01c577f2c2018-08-31 09:22:23 +0100293 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100294 fmt::format("Output tensor id is out of range for {} {}",
295 name,
296 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100297 }
298 outputNum = static_cast<unsigned int>(n);
299 }
300 return OutputId(name.substr(0,colonPos),outputNum);
301}
302
telsoa01c577f2c2018-08-31 09:22:23 +0100303#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
304 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
305 { \
306 throw ParseException( \
James Ward58dec6b2020-09-11 17:32:44 +0100307 fmt::format("Unsupported data format {} passed for {} node {}. " \
308 "Only NHWC and NCHW supported {}", \
309 FORMAT, \
310 NODE_TYPE, \
311 NODE_DEF.name(), \
312 CHECK_LOCATION().AsString())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100313 }
314
315#define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
316 if(PADDING != "SAME" && PADDING != "VALID" ) \
317 { \
318 throw ParseException( \
James Ward58dec6b2020-09-11 17:32:44 +0100319 fmt::format("Only 'SAME' and 'VALID' padding supported. Got {} for {} {}", \
320 PADDING, \
321 NODE_DEF.name(), \
322 CHECK_LOCATION().AsString())); \
telsoa01c577f2c2018-08-31 09:22:23 +0100323 } \
324
surmeh01bceff2f2018-03-29 16:29:27 +0100325} // namespace
326
327const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
328 { "Const", &TfParser::ParseConst },
329 { "Add", &TfParser::ParseAdd },
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000330 { "AddN", &TfParser::ParseAddN },
surmeh01bceff2f2018-03-29 16:29:27 +0100331 { "BiasAdd", &TfParser::ParseBiasAdd },
332 { "Identity", &TfParser::ParseIdentity },
333 { "Conv2D", &TfParser::ParseConv2D },
334 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
Conor Kennedyc2130a02018-12-05 11:05:54 +0000335 { "ExpandDims", &TfParser::ParseExpandDims },
surmeh01bceff2f2018-03-29 16:29:27 +0100336 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
FrancisMurtagh94412af2019-01-24 10:53:39 +0000337 { "Gather", &TfParser::ParseGather},
jimfly01a06bf312018-12-18 16:24:51 +0000338 { "Greater", &TfParser::ParseGreater},
surmeh01bceff2f2018-03-29 16:29:27 +0100339 { "ConcatV2", &TfParser::ParseConcat },
340 { "LRN", &TfParser::ParseLrn },
341 { "MatMul", &TfParser::ParseMatMul },
Ferran Balaguer51dd62f2019-01-11 19:29:18 +0000342 { "Mean", &TfParser::ParseMean },
surmeh01bceff2f2018-03-29 16:29:27 +0100343 { "Mul", &TfParser::ParseMul },
344 { "Placeholder", &TfParser::ParsePlaceholder },
saoste01bbd40612018-08-28 15:41:51 +0100345 { "RealDiv", &TfParser::ParseRealDiv },
surmeh01bceff2f2018-03-29 16:29:27 +0100346 { "Relu", &TfParser::ParseRelu },
347 { "Relu6", &TfParser::ParseRelu6 },
348 { "Reshape", &TfParser::ParseReshape },
349 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +0000350 { "Rsqrt", &TfParser::ParseRsqrt },
surmeh01bceff2f2018-03-29 16:29:27 +0100351 { "Shape", &TfParser::ParseShape },
352 { "Squeeze", &TfParser::ParseSqueeze },
353 { "Sigmoid", &TfParser::ParseSigmoid },
354 { "Softmax", &TfParser::ParseSoftmax },
355 { "Softplus", &TfParser::ParseSoftplus },
Sadik Armagan2ad6cb42018-12-27 11:23:44 +0000356 { "Split", &TfParser::ParseSplit },
Georgios Pinitas5e90aab2020-02-14 14:46:51 +0000357 { "StridedSlice", &TfParser::ParseStridedSlice },
surmeh01bceff2f2018-03-29 16:29:27 +0100358 { "Tanh", &TfParser::ParseTanh },
359 { "MaxPool", &TfParser::ParseMaxPool },
360 { "AvgPool", &TfParser::ParseAvgPool },
telsoa01c577f2c2018-08-31 09:22:23 +0100361 { "Maximum", &TfParser::ParseMaximum },
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +0000362 { "Minimum", &TfParser::ParseMinimum },
jimfly0184c70e62018-12-19 13:14:46 +0000363 { "Equal", &TfParser::ParseEqual },
jimfly01f6ba7472018-12-04 10:09:52 +0000364 { "Pad", &TfParser::ParsePad },
Sadik Armagan48d70932020-02-18 15:18:27 +0000365 { "Sub", &TfParser::ParseSub },
366 { "Pack" , &TfParser::ParseStack },
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +0000367 { "Stack", &TfParser::ParseStack },
368 { "Transpose", &TfParser::ParseTranspose },
narpra016f37f832018-12-21 18:30:00 +0000369};
370
371const std::list<std::string> TfParser::m_ControlInputs = {
372 "Assert"
surmeh01bceff2f2018-03-29 16:29:27 +0100373};
374
375ITfParser* ITfParser::CreateRaw()
376{
377 return new TfParser();
378}
379
380ITfParserPtr ITfParser::Create()
381{
382 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
383}
384
385void ITfParser::Destroy(ITfParser* parser)
386{
387 delete parser;
388}
389
390inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
391 uint32_t filterSize, bool samePadding,
392 uint32_t* paddingFront, uint32_t* paddingBack) {
393 *paddingFront = 0;
394 *paddingBack = 0;
395
396 if (samePadding) {
397 uint32_t outputSize = (inputSize + stride - 1) / stride;
398 uint32_t temp = (outputSize - 1) * stride + filterSize;
399 if (temp > inputSize) {
400 *paddingFront = (temp - inputSize) / 2;
401 *paddingBack = (temp - inputSize) - *paddingFront;
402 }
403 }
404}
405
406void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
407 bool samePadding)
408{
409 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
410}
411
412/// An Abstract base class which represents a single tensorflow operation (node)
413/// that has been (potentially partially) converted to Armnn.
414/// It may not yet have been fully converted into actual Armnn layers.
415class ParsedTfOperation
416{
417public:
418 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
419 : m_Parser(parser)
420 , m_Node(node)
421 {
422 }
423
424 virtual ~ParsedTfOperation() {};
425
426 const tensorflow::NodeDef& GetNode() const { return m_Node; }
427
428 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
429 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
430 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
431
432 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
433 virtual ParsedTfOperation* ResolveIdentityOperations()
434 {
435 return this;
436 }
437
438protected:
439 TfParser* m_Parser;
440 const tensorflow::NodeDef& m_Node;
441};
442
443/// An ParsedTfOperation where the Armnn equivalent is a single layer,
444/// with output slots that correspond directly to the Tf node outputs.
445class SingleLayerParsedTfOperation : public ParsedTfOperation
446{
447public:
448 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
449 : ParsedTfOperation(parser, node)
450 , m_Layer(layer)
451 {
452 }
453
454 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
455 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100456 ARMNN_ASSERT(m_Layer);
telsoa01c577f2c2018-08-31 09:22:23 +0100457 // Assumes one-to-one mapping between Tf and armnn output slots.
surmeh01bceff2f2018-03-29 16:29:27 +0100458 unsigned int armnnOutputSlotIdx = tfOutputIndex;
459 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
460 {
461 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100462 fmt::format("The requested output slot #{} "
463 "for {} does not exist {}",
464 armnnOutputSlotIdx,
465 m_Layer->GetName(),
466 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100467 }
468 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
469 }
470
471protected:
472 IConnectableLayer* m_Layer;
473};
474
telsoa01c577f2c2018-08-31 09:22:23 +0100475/// A SingleLayerParsedTfOperation for deferred layer creation.
surmeh01bceff2f2018-03-29 16:29:27 +0100476class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
477{
478public:
479 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
480 : SingleLayerParsedTfOperation(parser, node, nullptr)
481 {
482 }
483
484 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
485 {
486 if (!m_Layer)
487 {
488 CreateLayerDeferred();
489 }
490 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
491 }
492
493private:
494 virtual void CreateLayerDeferred() = 0;
495};
496
497
498TfParser::TfParser()
499 : m_Network(nullptr, nullptr)
500{
501}
502
503
504const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
505{
506 if (nodeDef->op() != "Identity")
507 {
508 return nodeDef;
509 }
510
511 if (nodeDef->input_size() != 1)
512 {
telsoa01c577f2c2018-08-31 09:22:23 +0100513 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100514 fmt::format("Identity node should have a single input! {} has {} inputs {}",
515 nodeDef->name(),
516 nodeDef->input_size(),
517 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100518 }
519
520 auto it = m_NodesByName.find(nodeDef->input(0));
521 if (it != m_NodesByName.end())
522 {
523 const tensorflow::NodeDef* inputNode = it->second;
524 return ResolveIdentityNode(inputNode);
525 }
526 else
527 {
telsoa01c577f2c2018-08-31 09:22:23 +0100528 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100529 fmt::format("Cannot find what the Identity node {} is linked to! {}",
530 nodeDef->name(),
531 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100532 }
533}
534
535std::vector<OutputOfConstNodeDef>
536TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
537{
538 std::vector<OutputOfConstNodeDef> ret;
539
surmeh013537c2c2018-05-18 16:31:43 +0100540 if (nodeDef.op() == "Const")
541 {
542 // For some reason const node can have "Control Inputs". We ignore them for now.
543 return ret;
544 }
545
Matthew Sloyan589e3e82020-09-11 16:17:48 +0100546 ret.reserve(armnn::numeric_cast<size_t>(nodeDef.input_size()));
surmeh01bceff2f2018-03-29 16:29:27 +0100547 for (int j = 0; j < nodeDef.input_size(); ++j)
548 {
549 OutputId outputId = ParseOutputId(nodeDef.input(j));
surmeh013537c2c2018-05-18 16:31:43 +0100550
551 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
552 {
narpra016f37f832018-12-21 18:30:00 +0000553 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
554 continue;
surmeh013537c2c2018-05-18 16:31:43 +0100555 }
556
surmeh01bceff2f2018-03-29 16:29:27 +0100557 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
558 if (inputIt == m_NodesByName.end())
559 {
560 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100561 fmt::format("Can't find node '{}', which is listed as an input of '{}' {}",
562 nodeDef.input(j),
563 nodeDef.name(),
564 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100565 }
566 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
567 }
568
569 return ret;
570}
571
572std::vector<OutputOfParsedTfOperation>
573TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
574 std::size_t expectedNumInputs)
575{
telsoa01c577f2c2018-08-31 09:22:23 +0100576 // Fetches the tensorflow nodes connected as inputs and validate the size.
surmeh01bceff2f2018-03-29 16:29:27 +0100577 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
578 const std::size_t numInputs = nodes.size();
579 if (numInputs != expectedNumInputs)
580 {
telsoa01c577f2c2018-08-31 09:22:23 +0100581 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100582 fmt::format("Unexpected number of inputs for node {}. Expected {}, found {} {}",
583 nodeDef.name(),
584 expectedNumInputs,
585 numInputs,
586 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100587 }
telsoa01c577f2c2018-08-31 09:22:23 +0100588 // Fetches the corresponding ParsedTfOperation operations
surmeh01bceff2f2018-03-29 16:29:27 +0100589 std::vector<OutputOfParsedTfOperation> result;
590 for (auto&& node : nodes)
591 {
592 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
593 if (it == m_ParsedTfOperations.end())
594 {
telsoa01c577f2c2018-08-31 09:22:23 +0100595 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100596 fmt::format("Node with name '{}' has not been parsed {}",
597 node.m_IndexedValue->name(),
598 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100599 }
600 ParsedTfOperation* parsedOp = it->second.get();
601 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
602 parsedOp = parsedOp->ResolveIdentityOperations();
603 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
604 }
605 return result;
606}
607
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000608IConnectableLayer* TfParser::CreateAdditionLayer(
609 const tensorflow::NodeDef& nodeDef,
610 IOutputSlot* input0Slot,
611 IOutputSlot* input1Slot,
612 const std::string& layerName)
613{
614 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
615 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
616
617 const unsigned int input0Dim = input0Info.GetNumDimensions();
618 const unsigned int input1Dim = input1Info.GetNumDimensions();
619 if (input0Dim != input1Dim)
620 {
621 // broadcasting where input0 and input1 have different number of dimensions
622 // is only supported for 1D and 4D tensors pair
623 if (input0Dim == 1 && input1Dim == 4)
624 {
625 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
626 }
627 else if (input0Dim == 4 && input1Dim == 1)
628 {
629 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
630 }
631 else
632 {
633 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100634 fmt::format("Unsupported broadcast configuration for {} operation {} {}",
635 layerName,
636 nodeDef.name(),
637 CHECK_LOCATION().AsString()));
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000638 }
639 }
640 IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
641
642 input0Slot->Connect(layer->GetInputSlot(0));
643 input1Slot->Connect(layer->GetInputSlot(1));
644
645 // Ensure the output tensor has the correct dimensions even if a broadcast has been done
646 TensorInfo outputInfo = input0Slot->GetTensorInfo();
647 std::vector<unsigned int> outputShape;
648
649 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
650 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
651
652 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
653 {
654 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
655 }
656
657 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
658 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
659
660 return layer;
661}
662
663IConnectableLayer* TfParser::CreateAdditionLayer(
664 const tensorflow::NodeDef& nodeDef,
665 IConnectableLayer* layerOne,
666 IConnectableLayer* layerTwo,
667 unsigned int numberOfAddition,
668 unsigned long numberOfLayersToConnect,
669 bool isOdd)
670{
671 IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
672 IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
673 std::string layerName(nodeDef.name());
674 if (isOdd || numberOfLayersToConnect != 2)
675 {
676 // we are not connecting the final layer
677 layerName.append("_addN_").append(std::to_string(numberOfAddition));
678 }
679 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
680}
681
682IConnectableLayer* TfParser::CreateAdditionLayer(
683 const tensorflow::NodeDef& nodeDef,
684 const OutputOfParsedTfOperation& opOne,
685 const OutputOfParsedTfOperation& opTwo,
686 unsigned int numberOfAddition)
687{
688 IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
689 IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
690 std::string layerName(nodeDef.name());
691 layerName.append("_addN_").append(std::to_string(numberOfAddition));
692 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
693}
694
695IConnectableLayer* TfParser::CreateAdditionLayer(
696 const tensorflow::NodeDef& nodeDef,
697 const OutputOfParsedTfOperation& op,
698 IConnectableLayer* layer)
699{
700 IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
701 IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
702 return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
703}
704
705ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
706{
Jan Eilers8eb25602020-03-09 12:13:48 +0000707 IgnoreUnused(graphDef);
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000708 uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
709 if (numberOfInputs < 2)
710 {
711 // should never happen
712 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100713 fmt::format("AddN Node with name '{}' has less than two ({}) inputs {}",
714 nodeDef.name(),
715 std::to_string(numberOfInputs),
716 CHECK_LOCATION().AsString()));
Ferran Balaguerfbdad032018-12-28 18:15:24 +0000717 }
718 else if (numberOfInputs == 2)
719 {
720 //this is the same as a simple Add operation
721 return AddAdditionLayer(nodeDef, false);
722 }
723 else
724 {
725 // build a binary tree of Add layers and return the final Add as the return from the function
726 // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
727 // OutputOfParsedTfOperation, otherwise it will be two layers being added together
728 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
729 unsigned int numberOfAdditions = 0;
730 std::vector<IConnectableLayer*> layers;
731 // NOTE: at this point we will have a minimum of three inputs
732 for (unsigned int i = 0; i < numberOfInputs; ++i)
733 {
734 // every time i is odd we have two inputs to process.
735 bool onSecondItem = i % 2;
736 if (onSecondItem)
737 {
738 ++numberOfAdditions;
739 IConnectableLayer* newLayer = CreateAdditionLayer(
740 nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
741 layers.push_back(newLayer);
742 }
743 }
744
745 std::vector<IConnectableLayer*> layersToConnect(layers);
746 unsigned long numberOfLayersToConnect = layersToConnect.size();
747 bool isOdd = numberOfInputs % 2;
748
749 while (numberOfLayersToConnect > 1)
750 {
751 layers.clear();
752 for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
753 bool onSecondItem = i % 2;
754 if (onSecondItem) {
755 ++numberOfAdditions;
756 IConnectableLayer* newLayer = CreateAdditionLayer(
757 nodeDef,
758 layersToConnect[i - 1],
759 layersToConnect[i],
760 numberOfAdditions,
761 numberOfLayersToConnect,
762 isOdd);
763 layers.push_back(newLayer);
764 }
765 }
766 //OK... need to go again... maybe
767 layersToConnect = layers;
768 numberOfLayersToConnect = layersToConnect.size();
769 }
770 IConnectableLayer* finalLayer = layersToConnect[0];
771 // if we had an odd number of inputs we need to connect the final layer to the
772 // last OutputOfParsedTfOperation in order to create the last Add layer we will
773 // be handing back.
774 if (isOdd)
775 {
776 // connect the final layer to the last op
777 finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
778 }
779 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
780 }
781}
782
surmeh01bceff2f2018-03-29 16:29:27 +0100783ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
784{
Jan Eilers8eb25602020-03-09 12:13:48 +0000785 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100786 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
787
telsoa01c577f2c2018-08-31 09:22:23 +0100788 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
789 // together as FullyConnected.
surmeh01bceff2f2018-03-29 16:29:27 +0100790 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
791 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
792 {
793 IConnectableLayer* layer =
794 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
795 &nodeDef,nodeDef.name().c_str());
796 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
797 }
798 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
799 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
800 {
801 IConnectableLayer* layer =
802 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
803 &nodeDef,nodeDef.name().c_str());
804 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
805 }
806 else
807 {
telsoa01c577f2c2018-08-31 09:22:23 +0100808 // Otherwise it's just a regular addition.
surmeh01bceff2f2018-03-29 16:29:27 +0100809 return AddAdditionLayer(nodeDef);
810 }
811}
812
813ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
814{
Jan Eilers8eb25602020-03-09 12:13:48 +0000815 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100816 return AddAdditionLayer(nodeDef, true);
817}
818
819/// An ParsedTfOperation which forwards to another (used for Identity nodes).
820class ParsedIdentityTfOperation : public ParsedTfOperation
821{
822public:
823 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
824 : ParsedTfOperation(parser, node)
825 , m_Representative(representative)
826 {
827 }
828
829 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
830 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100831 ARMNN_ASSERT(m_Representative);
surmeh01bceff2f2018-03-29 16:29:27 +0100832 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
833 }
834
835 virtual ParsedTfOperation* ResolveIdentityOperations() override
836 {
837 return m_Representative->ResolveIdentityOperations();
838 }
839
840private:
841 ParsedTfOperation* m_Representative;
842};
843
844ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
845{
Jan Eilers8eb25602020-03-09 12:13:48 +0000846 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +0100847 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
848 // Any requests for the output slots of this node should be forwarded to the node connected as input.
849 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
850}
851
852/// An ParsedTfOperation for a Const node.
853/// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
854/// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
855template <typename T>
856class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
857{
858public:
859 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
860 const T* tensorData, const TensorInfo& tensorInfo)
861 : DeferredSingleLayerParsedTfOperation(parser, node),
862 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
863 m_TensorInfo(tensorInfo)
864 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100865 ARMNN_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
surmeh01bceff2f2018-03-29 16:29:27 +0100866 }
867
868 void CreateLayerDeferred() override
869 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100870 ARMNN_ASSERT(m_Layer == nullptr);
surmeh01bceff2f2018-03-29 16:29:27 +0100871 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
872 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
873 }
874
Matteo Martincigh482ca852018-12-12 09:20:55 +0000875 ConstTensor GetConstTensor(std::vector<T>& outputTensorData) const
surmeh01bceff2f2018-03-29 16:29:27 +0100876 {
surmeh01bceff2f2018-03-29 16:29:27 +0100877 outputTensorData.resize(m_TensorInfo.GetNumElements());
878
Matteo Martincigh482ca852018-12-12 09:20:55 +0000879 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
880
telsoa01c577f2c2018-08-31 09:22:23 +0100881 // Updates the result to point to the user provided storage.
Matteo Martincigh482ca852018-12-12 09:20:55 +0000882 ConstTensor constTensor(m_TensorInfo, outputTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +0100883 return constTensor;
884 }
885
Matteo Martincigh46315822018-11-28 16:22:36 +0000886 const T* GetStorage() const
887 {
888 return m_Storage.data();
889 }
890
891 const TensorInfo& GetTensorInfo() const
892 {
893 return m_TensorInfo;
894 }
895
surmeh01bceff2f2018-03-29 16:29:27 +0100896private:
897 ///< Manages the lifetime of the tensor data.
898 std::vector<T> m_Storage;
899 ///< Describes the layout of the tensor and points to the data in m_Storage.
900 TensorInfo m_TensorInfo;
901};
902
telsoa01c577f2c2018-08-31 09:22:23 +0100903DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
904 const tensorflow::NodeDef& nodeDef)
surmeh01bceff2f2018-03-29 16:29:27 +0100905{
906 switch (tfDataType)
907 {
908 case tensorflow::DT_FLOAT:
909 return DataType::Float32;
910 break;
911 case tensorflow::DT_INT32:
912 return DataType::Signed32;
913 break;
914 default:
telsoa01c577f2c2018-08-31 09:22:23 +0100915 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +0100916 fmt::format("Unknown DataType {} for node {} {}",
917 tensorflow::DataType_Name(tfDataType),
918 nodeDef.name(),
919 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +0100920 }
921}
922
923struct ParseTfTensorValueList
924{
925 template<typename DataType>
926 static void Parse(
927 const tensorflow::TensorProto& tfTensor,
928 unsigned int dstElements,
929 std::vector<int8_t>& outputData);
930
931 template <typename DataType>
932 static void ReadData(const void* srcData, unsigned int numSrcElements,
933 std::vector<int8_t>& dstData, unsigned int numDstElements)
934 {
telsoa01c577f2c2018-08-31 09:22:23 +0100935 // If there are no entries in the list, perform no action.
surmeh01bceff2f2018-03-29 16:29:27 +0100936 if (numSrcElements == 0)
937 {
938 return;
939 }
940
telsoa01c577f2c2018-08-31 09:22:23 +0100941 // If no size was provided, use the length of the value list.
surmeh01bceff2f2018-03-29 16:29:27 +0100942 if (numDstElements == 0)
943 {
944 numDstElements = numSrcElements;
945 }
946
telsoa01c577f2c2018-08-31 09:22:23 +0100947 // Allocates memory.
surmeh01bceff2f2018-03-29 16:29:27 +0100948 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
949
950 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
951 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
952
telsoa01c577f2c2018-08-31 09:22:23 +0100953 // Copies the value list entries into the destination.
surmeh01bceff2f2018-03-29 16:29:27 +0100954 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
955
956 if (numDstElements > numSrcElements)
957 {
telsoa01c577f2c2018-08-31 09:22:23 +0100958 // Uses the last element in the list to fill the remaining entries.
surmeh01bceff2f2018-03-29 16:29:27 +0100959 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
960 }
961 }
962
963};
964
965template <>
966void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
967 unsigned int dstElements, std::vector<int8_t>& outputData)
968{
969 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
970 outputData, dstElements);
971}
972
973template <>
974void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
975 unsigned int dstElements, std::vector<int8_t>& outputData)
976{
977 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
978 outputData, dstElements);
979}
980
981template <template<typename> class OperatorType, typename T = int8_t>
982struct MakeTfOperation
983{
984 template<typename DataType, class... Args>
985 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
986 Args&&... args)
987 {
988 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
989 }
990};
991
992template <>
993struct MakeTfOperation<ParsedConstTfOperation>
994{
995 template<typename DataType, class... Args>
996 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
997 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
998 {
999 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1000 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
1001 }
1002};
1003
1004template <class FuncType>
1005struct InvokeParseFunction
1006{
1007 template<class ResType, class... Args>
1008 inline static ResType Result(DataType dataType, Args&&... args)
1009 {
1010 if (dataType == DataType::Float32)
1011 {
1012 return FuncType::template Parse<float>(std::forward<Args>(args)...);
1013 }
1014 else if (dataType == DataType::Signed32)
1015 {
1016 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1017 }
1018
1019 return ResType();
1020 }
1021
1022 template<class... Args>
1023 inline static void Result(DataType dataType, Args&&... args)
1024 {
1025 if (dataType == DataType::Float32)
1026 {
1027 FuncType::template Parse<float>(std::forward<Args>(args)...);
1028 }
1029 else if (dataType == DataType::Signed32)
1030 {
1031 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1032 }
1033 }
1034};
1035
1036ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1037{
Jan Eilers8eb25602020-03-09 12:13:48 +00001038 IgnoreUnused(graphDef);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001039 ARMNN_ASSERT(nodeDef.op() == "Const");
surmeh01bceff2f2018-03-29 16:29:27 +01001040
1041 if (nodeDef.attr().count("value") == 0)
1042 {
telsoa01c577f2c2018-08-31 09:22:23 +01001043 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001044 fmt::format("Value not found for Const node - {} {}",
1045 nodeDef.name(),
1046 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001047 }
1048
1049 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1050 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1051 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1052
1053 const auto GetDimensionSize = [](auto& d) { return d.size(); };
1054
1055 std::vector<unsigned int> dimensionSizes;
1056 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1057 std::back_inserter(dimensionSizes), GetDimensionSize);
1058
telsoa01c577f2c2018-08-31 09:22:23 +01001059 // Calculates number of elements.
1060 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001061 unsigned int numElements = 0U;
1062
1063 if (!dimensionSizes.empty())
1064 {
1065 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1066 1U, std::multiplies<unsigned int>());
1067 }
1068
1069 std::vector<int8_t> tensorData;
1070
telsoa01c577f2c2018-08-31 09:22:23 +01001071 // Get tensor data from the list of values attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001072 if (tfTensor.tensor_content().empty())
1073 {
1074 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1075
1076 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
telsoa01c577f2c2018-08-31 09:22:23 +01001077 // tensor of the provided number of elements.
surmeh01bceff2f2018-03-29 16:29:27 +01001078 if (numElements == 0)
1079 {
telsoa01c577f2c2018-08-31 09:22:23 +01001080 const unsigned int tfNumElements =
1081 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001082 dimensionSizes.push_back(tfNumElements);
1083 }
1084 }
telsoa01c577f2c2018-08-31 09:22:23 +01001085 // Gets tensor data from tensor content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001086 else
1087 {
1088 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1089
telsoa01c577f2c2018-08-31 09:22:23 +01001090 // Checks if a tensor shape is defined for the tensor content.
surmeh01bceff2f2018-03-29 16:29:27 +01001091 if (numElements == 0)
1092 {
telsoa01c577f2c2018-08-31 09:22:23 +01001093 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001094 fmt::format("No tensor shape found for Const node - {} {}",
1095 nodeDef.name(),
1096 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001097 }
1098 }
1099
telsoa01c577f2c2018-08-31 09:22:23 +01001100 // Const node requires at least a list of values or a content attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001101 if (tensorData.empty())
1102 {
telsoa01c577f2c2018-08-31 09:22:23 +01001103 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001104 fmt::format("No tensor data found for Const node - {} {}",
1105 nodeDef.name(),
1106 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001107 }
1108
telsoa01c577f2c2018-08-31 09:22:23 +01001109 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1110 dimensionSizes.data(),
1111 dataType);
surmeh01bceff2f2018-03-29 16:29:27 +01001112
1113 // If we have a list of values, then the length of the list must be
telsoa01c577f2c2018-08-31 09:22:23 +01001114 // less than or equal to the number of elements implied by the shape argument.
surmeh01bceff2f2018-03-29 16:29:27 +01001115 if (tensorData.size() > tensorInfo.GetNumBytes())
1116 {
telsoa01c577f2c2018-08-31 09:22:23 +01001117 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001118 fmt::format("Number of elements ({}) should be less than or equal "
1119 "to the number of elements implied by the shape argument ({}) for Const node - {} {}",
1120 (tensorData.size() / GetDataTypeSize(dataType)),
1121 tensorInfo.GetNumElements(),
1122 nodeDef.name(),
1123 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001124 }
1125
1126 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1127 dataType, this, nodeDef, tensorData, tensorInfo);
1128}
1129
1130template<typename Type>
1131bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
1132{
1133 auto it = m_ParsedTfOperations.find(nodeName);
jimfly01f6ba7472018-12-04 10:09:52 +00001134 if (it == m_ParsedTfOperations.end())
surmeh01bceff2f2018-03-29 16:29:27 +01001135 {
1136 return false;
1137 }
jimfly01f6ba7472018-12-04 10:09:52 +00001138 return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1139}
1140
1141template<typename Type>
1142bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
1143{
1144 return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
surmeh01bceff2f2018-03-29 16:29:27 +01001145}
1146
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00001147unsigned int TfParser::GetConstInputIndex(const std::vector<OutputOfParsedTfOperation>& inputs)
1148{
1149 for (unsigned int i = 0; i < inputs.size(); i++)
1150 {
1151 if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1152 {
1153 return i;
1154 }
1155 }
1156 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001157 fmt::format("ArmNN only supports operators with constant axis. {}",
1158 CHECK_LOCATION().AsString()));
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00001159
1160}
1161
surmeh01bceff2f2018-03-29 16:29:27 +01001162ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
1163 const tensorflow::GraphDef& graphDef)
1164{
Jan Eilers8eb25602020-03-09 12:13:48 +00001165 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001166 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1167 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1168 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1169
1170 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1171 {
telsoa01c577f2c2018-08-31 09:22:23 +01001172 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001173 fmt::format("ArmNN only supports Convolution layers with constant weights for {}, input {} {}",
1174 nodeDef.name(),
1175 inputs[1].m_IndexedValue->GetNode().name(),
1176 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001177 }
1178 ParsedConstTfOperation<float>* weightNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001179 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001180
1181 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1182 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1183 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1184
telsoa01c577f2c2018-08-31 09:22:23 +01001185 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
surmeh01bceff2f2018-03-29 16:29:27 +01001186 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1187 if (!dilations.empty())
1188 {
1189 for (auto dilation : dilations)
1190 {
1191 if (dilation != 1u)
1192 {
telsoa01c577f2c2018-08-31 09:22:23 +01001193 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001194 fmt::format("ArmNN only supports Convolution layers with dilations [1,1,1,1] for {} {}",
1195 nodeDef.name(),
1196 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001197 }
1198 }
1199 }
1200
1201 Convolution2dDescriptor desc;
1202 desc.m_BiasEnabled = false;
1203
telsoa01c577f2c2018-08-31 09:22:23 +01001204 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1205
Matteo Martincigh46315822018-11-28 16:22:36 +00001206 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001207
Matteo Martincigh46315822018-11-28 16:22:36 +00001208 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001209
Matteo Martincigh46315822018-11-28 16:22:36 +00001210 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001211
Matteo Martincigh46315822018-11-28 16:22:36 +00001212 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1213 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001214
Matteo Martincigh46315822018-11-28 16:22:36 +00001215 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1216 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1217
1218 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1219 // Tensorflow weights are [H, W, In, Out].
1220 // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1221 // and [Out, In, H, W] when the data layout is NCHW.
1222 PermutationVector permutationVector =
1223 dataLayout == DataLayout::NHWC ?
1224 std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1225 std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1226
1227 // Swizzle the tensor using the given permutation vector.
1228 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1229 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1230
1231 // Swizzles the content of the tensor's permanent storage into a local storage.
1232 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1233 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001234 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Matteo Martincigh46315822018-11-28 16:22:36 +00001235
1236 // Create a weight tensor with the newly swizzled data.
1237 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1238
1239 uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1240 uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001241
1242 bool padding = false;
1243 TensorInfo outputInfo;
Matteo Martincigh46315822018-11-28 16:22:36 +00001244 unsigned int outputHeight = 0;
1245 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001246
1247 CHECK_PADDING_TYPE(nodeDef, paddingString);
1248
surmeh01bceff2f2018-03-29 16:29:27 +01001249 if (paddingString == "SAME")
1250 {
1251 padding = true;
Matteo Martincigh46315822018-11-28 16:22:36 +00001252
1253 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1254 static_cast<float>(desc.m_StrideY)));
1255 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1256 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001257 }
1258 else if (paddingString == "VALID")
1259 {
1260 padding = false;
Matteo Martincigh46315822018-11-28 16:22:36 +00001261
1262 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1263 static_cast<float>(desc.m_StrideY)));
1264 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1265 static_cast<float>(desc.m_StrideX)));
1266 }
1267
1268 switch (dataLayout)
1269 {
1270 case DataLayout::NHWC:
1271 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1272 outputHeight,
1273 outputWidth,
1274 weightTensor.GetShape()[0] },
1275 DataType::Float32);
1276 break;
1277 case DataLayout::NCHW:
1278 default:
surmeh01bceff2f2018-03-29 16:29:27 +01001279 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1280 weightTensor.GetShape()[0],
Matteo Martincigh46315822018-11-28 16:22:36 +00001281 outputHeight,
1282 outputWidth },
1283 DataType::Float32);
1284 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001285 }
surmeh01bceff2f2018-03-29 16:29:27 +01001286
1287 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1288 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1289
Matteo Martincighfc598e12019-05-14 10:36:13 +01001290 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc,
1291 weightTensor,
1292 EmptyOptional(),
1293 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001294 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Matteo Martincigh46315822018-11-28 16:22:36 +00001295 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001296
1297 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1298}
1299
1300ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
telsoa01c577f2c2018-08-31 09:22:23 +01001301 const tensorflow::GraphDef& graphDef)
surmeh01bceff2f2018-03-29 16:29:27 +01001302{
Jan Eilers8eb25602020-03-09 12:13:48 +00001303 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001304 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1305 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1306 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1307
1308 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1309 {
telsoa01c577f2c2018-08-31 09:22:23 +01001310 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001311 fmt::format("ArmNN only supports Depthwise Convolution layer with constant weights. "
1312 "Non const input found {} for node {} {}",
1313 inputs[1].m_IndexedValue->GetNode().name(),
1314 nodeDef.name(),
1315 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001316 }
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001317
surmeh01bceff2f2018-03-29 16:29:27 +01001318 ParsedConstTfOperation<float>* weightNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001319 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001320
surmeh01bceff2f2018-03-29 16:29:27 +01001321 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1322 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1323 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1324
1325 DepthwiseConvolution2dDescriptor desc;
1326 desc.m_BiasEnabled = false;
1327
telsoa01c577f2c2018-08-31 09:22:23 +01001328 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1329
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001330 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001331
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001332 desc.m_DataLayout = dataLayout;
surmeh01bceff2f2018-03-29 16:29:27 +01001333
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001334 DataLayoutIndexed dataLayoutIndexed(dataLayout);
surmeh01bceff2f2018-03-29 16:29:27 +01001335
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001336 desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1337 desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01001338
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001339 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1340 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1341
1342 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
Matteo Martincigh747ef822018-12-18 09:26:39 +00001343 // Tensorflow weights come in the format [H, W, I, M].
1344 // ArmNN weights have to be [M, I, H, W].
1345 PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001346
1347 // Swizzle the tensor using the given permutation vector.
1348 const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1349 const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1350
1351 // Swizzles the content of the tensor's permanent storage into a local storage.
1352 std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1353 armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
Matteo Martincighd5b9e642019-01-04 18:01:21 +00001354 weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001355
1356 // Create a weight tensor with the newly swizzled data.
1357 ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1358
Matteo Martincigh747ef822018-12-18 09:26:39 +00001359 uint32_t weightHeight = weightTensor.GetShape()[2];
1360 uint32_t weightWidth = weightTensor.GetShape()[3];
surmeh01bceff2f2018-03-29 16:29:27 +01001361
1362 bool padding = false;
1363 TensorInfo outputInfo;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001364 unsigned int outputHeight = 0;
1365 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01001366
1367 CHECK_PADDING_TYPE(nodeDef, paddingString);
1368
surmeh01bceff2f2018-03-29 16:29:27 +01001369 if (paddingString == "SAME")
1370 {
1371 padding = true;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001372
1373 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
1374 static_cast<float>(desc.m_StrideY)));
1375 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
1376 static_cast<float>(desc.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01001377 }
1378 else if (paddingString == "VALID")
1379 {
1380 padding = false;
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001381
1382 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight - weightHeight + 1) /
1383 static_cast<float>(desc.m_StrideY)));
1384 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth - weightWidth + 1) /
1385 static_cast<float>(desc.m_StrideX)));
1386 }
1387
1388 switch (dataLayout)
1389 {
1390 case DataLayout::NHWC:
1391 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1392 outputHeight,
1393 outputWidth,
Matteo Martincigh747ef822018-12-18 09:26:39 +00001394 weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001395 DataType::Float32);
1396 break;
1397 case DataLayout::NCHW:
1398 default:
1399 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1400 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1401 outputHeight,
1402 outputWidth },
1403 DataType::Float32);
1404 break;
surmeh01bceff2f2018-03-29 16:29:27 +01001405 }
surmeh01bceff2f2018-03-29 16:29:27 +01001406
1407 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1408 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1409
Matteo Martincighfc598e12019-05-14 10:36:13 +01001410 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1411 weightTensor,
1412 EmptyOptional(),
1413 nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01001414 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Ferran Balaguer6a669d72018-12-11 10:29:05 +00001415 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001416
1417 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1418}
1419
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001420TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef,
1421 TensorInfo inputTensorInfo,
1422 std::int32_t expandDim)
Conor Kennedyc2130a02018-12-05 11:05:54 +00001423{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001424 ARMNN_ASSERT(nodeDef.op() == "ExpandDims");
Conor Kennedyc2130a02018-12-05 11:05:54 +00001425
1426 if (inputTensorInfo.GetNumDimensions() > 4) {
1427 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001428 fmt::format("Unsupported number of dimensions: {} for input shape for ExpandDims {} {}",
1429 inputTensorInfo.GetNumDimensions(),
1430 nodeDef.name(),
1431 CHECK_LOCATION().AsString()));
Conor Kennedyc2130a02018-12-05 11:05:54 +00001432 }
1433
Matthew Sloyan589e3e82020-09-11 16:17:48 +01001434 std::int32_t inputDimSize = armnn::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
Conor Kennedyc2130a02018-12-05 11:05:54 +00001435 std::vector<uint32_t> outputDims;
1436
1437 // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1438 if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1439 {
1440 // add current input shape to outputDims
1441 for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1442 auto currentDimension = inputTensorInfo.GetShape()[i];
1443 outputDims.push_back(currentDimension);
1444 }
1445
1446 // insert a dimension of 1 at index 'expandDim' of inputs shape
1447 if (expandDim >= 0)
1448 {
1449 auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1450 outputDims.insert(getPosition, 1);
1451 }
1452
1453 // if negative number for 'expandDim' then count backwards from the last element
1454 // and insert 1 dimension at index 'expandDim'
1455 if (expandDim < 0)
1456 {
Matthew Sloyan589e3e82020-09-11 16:17:48 +01001457 int outputDimSize = armnn::numeric_cast<int>(outputDims.size() + 1);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001458 auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1459 outputDims.insert(getPosition, 1);
1460 }
1461 }
1462 else
1463 {
1464 throw InvalidArgumentException(
James Ward58dec6b2020-09-11 17:32:44 +01001465 fmt::format("Cannot expand dimension {} in input tensor with {} dimension {}",
1466 expandDim,
1467 inputDimSize,
1468 CHECK_LOCATION().AsString()));
Conor Kennedyc2130a02018-12-05 11:05:54 +00001469 }
1470
1471 if (outputDims.size() > 4)
1472 {
1473 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001474 fmt::format("Unsupported number of dimensions: {} for output shape for ExpandDims {} {}",
1475 outputDims.size(),
1476 nodeDef.name(),
1477 CHECK_LOCATION().AsString()));
Conor Kennedyc2130a02018-12-05 11:05:54 +00001478 }
1479
1480 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1481 outputDims.data());
1482
1483 TensorInfo outTensorInfo = inputTensorInfo;
1484 outTensorInfo.SetShape(outShape);
1485
1486 return outTensorInfo;
1487}
1488
1489ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1490{
Jan Eilers8eb25602020-03-09 12:13:48 +00001491 IgnoreUnused(graphDef);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001492
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001493 // Number of inputs can either
1494 // be 1 - that indicates that the axis parameter is passed as an attribute of the operation
1495 // or 2 - which means that the axis parameter is passed as a second input
1496 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
1497 const std::size_t numInputs = nodes.size();
1498 std::vector<OutputOfParsedTfOperation> inputs;
1499 std::int32_t expandDim; // axis or dim parameter. Describes which dimension to expand.
1500 if (numInputs == 1)
1501 {
1502 inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1503 expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1504 }
1505 else
1506 {
1507 inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1508
1509 // make sure data type is int32
1510 IOutputSlot& prevLayerOutputSlot = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1511 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1512
1513 if (inputTensorInfo.GetDataType()!=armnn::DataType::Signed32)
1514 {
1515 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001516 fmt::format("The axis parameter of ExpandDims operation given as second input is not of type int32."
1517 " Input {0} Node {1} {2}",
1518 inputs[1].m_IndexedValue->GetNode().name(),
1519 nodeDef.name(),
1520 CHECK_LOCATION().AsString()));
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001521 }
1522
1523 // ensure the second input is a constant value
1524 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1525 {
1526 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001527 fmt::format("ArmNN only supports ExpandDims layers with constant axis/dim parameter. "
1528 "Input {0} Node {1} {2}",
1529 inputs[1].m_IndexedValue->GetNode().name(),
1530 nodeDef.name(),
1531 CHECK_LOCATION().AsString()));
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001532 }
1533
1534 // make sure the second input is scalar or contains only a single value
1535 // (we don't support expand dims for multiple axis but we don't care what shape the
1536 // given tensor has as long as there is only a single value in it
1537 // e.g. a tensor like this [[[1]]] is completely fine)
1538 if (inputTensorInfo.GetNumElements() != 1)
1539 {
1540 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001541 fmt::format("The axis parameter of ExpandDims operation given as second input is not "
1542 "allowed to hold more than one value. "
1543 "Input {0} Node {1} {2}",
1544 inputs[1].m_IndexedValue->GetNode().name(),
1545 nodeDef.name(),
1546 CHECK_LOCATION().AsString()));
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001547 }
1548
1549 ParsedConstTfOperation<int32_t>* expandDimsNode =
1550 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1551
1552 memcpy(&expandDim, expandDimsNode->GetStorage(), sizeof(expandDim));
1553 }
1554
1555 // First input is the vector that should be expanded by another dimension
Conor Kennedyc2130a02018-12-05 11:05:54 +00001556 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1557 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1558
1559 TensorInfo outputInfo;
Jan Eilers1f3b49b2020-09-08 08:57:40 +01001560 outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo, expandDim);
Conor Kennedyc2130a02018-12-05 11:05:54 +00001561
1562 ReshapeDescriptor reshapeDesc;
1563 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1564 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1565 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1566 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1567
1568 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1569}
1570
surmeh01bceff2f2018-03-29 16:29:27 +01001571ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1572 const tensorflow::GraphDef& graphDef)
1573{
Jan Eilers8eb25602020-03-09 12:13:48 +00001574 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01001575 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1576
1577 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1578 {
telsoa01c577f2c2018-08-31 09:22:23 +01001579 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001580 fmt::format("ArmNN only supports FusedBatchNormalization layers with constant scale. "
1581 "Input {}. Node {} {}",
1582 inputs[1].m_IndexedValue->GetNode().name(),
1583 nodeDef.name(),
1584 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001585 }
1586 ParsedConstTfOperation<float>* scaleNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001587 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001588
1589 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1590 {
telsoa01c577f2c2018-08-31 09:22:23 +01001591 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001592 fmt::format("ArmNN only supports FusedBatchNormalization layers with constant offset. "
1593 "Input {}. Node {} {}",
1594 inputs[2].m_IndexedValue->GetNode().name(),
1595 nodeDef.name(),
1596 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001597 }
1598 ParsedConstTfOperation<float>* offsetNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001599 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001600
1601 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1602 {
telsoa01c577f2c2018-08-31 09:22:23 +01001603 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001604 fmt::format("ArmNN only supports FusedBatchNormalization layers with constant mean. "
1605 "Input {}. Node {} {}",
1606 inputs[3].m_IndexedValue->GetNode().name(),
1607 nodeDef.name(),
1608 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001609 }
1610 ParsedConstTfOperation<float>* meanNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001611 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001612
1613 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1614 {
telsoa01c577f2c2018-08-31 09:22:23 +01001615 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001616 fmt::format("ArmNN only supports FusedBatchNormalization layers with constant variance. "
1617 "Input {}. Node {} {}",
1618 inputs[4].m_IndexedValue->GetNode().name(),
1619 nodeDef.name(),
1620 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01001621 }
1622 ParsedConstTfOperation<float>* varianceNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01001623 PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01001624
Aron Virginas-Tar2e259272019-11-27 13:29:51 +00001625 const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef, "data_format", "NHWC");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001626 CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1627
telsoa01c577f2c2018-08-31 09:22:23 +01001628 // The descriptor only has the epsilon attribute.
surmeh01bceff2f2018-03-29 16:29:27 +01001629 BatchNormalizationDescriptor desc;
1630 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
Matteo Martincigh075c7502018-12-05 13:10:45 +00001631 desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
surmeh01bceff2f2018-03-29 16:29:27 +01001632
telsoa01c577f2c2018-08-31 09:22:23 +01001633 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1634 // locally until the layer is added.
surmeh01bceff2f2018-03-29 16:29:27 +01001635 std::vector<float> scaleTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001636 ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001637
1638 std::vector<float> offsetTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001639 ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001640
1641 std::vector<float> meanTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001642 ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001643
1644 std::vector<float> varianceTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001645 ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01001646
1647 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1648 meanTensor,
1649 varianceTensor,
1650 offsetTensor,
1651 scaleTensor,
1652 nodeDef.name().c_str());
1653
1654 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1655
Matteo Martincigh075c7502018-12-05 13:10:45 +00001656 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1657 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01001658
1659 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1660}
1661
telsoa01c577f2c2018-08-31 09:22:23 +01001662bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1663 size_t alphaLayerIndex,
1664 const OutputOfParsedTfOperation& otherOp,
1665 armnn::IOutputSlot** outputOfLeakyRelu,
1666 armnn::ActivationDescriptor & desc)
1667{
1668 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1669
1670 // Verifying all these assumptions hold:
1671 //
1672 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1673 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1674 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1675 //
1676
1677 if (mulNodeDef.op() == "Mul")
1678 {
1679 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1680 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1681
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001682 ARMNN_ASSERT(inputs.size() == 2);
1683 ARMNN_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1684 ARMNN_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1685 ARMNN_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
telsoa01c577f2c2018-08-31 09:22:23 +01001686
1687 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1688 {
1689 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1690 {
1691 ParsedConstTfOperation<float>* alpha =
Jan Eilersbb446e52020-04-02 13:56:54 +01001692 PolymorphicDowncast<ParsedConstTfOperation<float> *>(
telsoa01c577f2c2018-08-31 09:22:23 +01001693 inputs[alphaLayerIndex].m_IndexedValue);
1694
1695 std::vector<float> const_data;
Matteo Martincigh482ca852018-12-12 09:20:55 +00001696 ConstTensor const_tensor = alpha->GetConstTensor(const_data);
telsoa01c577f2c2018-08-31 09:22:23 +01001697
1698 if (const_data.size() == 1)
1699 {
1700 desc.m_Function = ActivationFunction::LeakyReLu;
1701 desc.m_A = const_data[0];
1702
1703 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1704 return true;
1705 }
1706 }
1707 }
1708 }
1709 return false;
1710}
1711
telsoa01c577f2c2018-08-31 09:22:23 +01001712ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1713 const tensorflow::GraphDef& graphDef)
1714{
Jan Eilers8eb25602020-03-09 12:13:48 +00001715 IgnoreUnused(graphDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001716 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
Sadik Armagan975c09a2018-12-04 10:02:08 +00001717 if (inputs.size() != 2)
1718 {
1719 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001720 fmt::format("Maximum expects two inputs!. Got {} for Node {} {}",
1721 inputs.size(),
1722 nodeDef.name(),
1723 CHECK_LOCATION().AsString()));
Sadik Armagan975c09a2018-12-04 10:02:08 +00001724 }
1725
telsoa01c577f2c2018-08-31 09:22:23 +01001726 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1727 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1728 IOutputSlot* outputOfLeakyRelu = nullptr;
1729
1730 ActivationDescriptor desc;
1731
Sadik Armagan975c09a2018-12-04 10:02:08 +00001732 // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1733 // i.e. one of the four possible scenarios:
1734 // 1, max(mul(a, x), x)
1735 // 2, max(mul(x, a), x)
1736 // 3, max(x, mul(a, x))
1737 // 4, max(x, mul(x, a))
1738 // These are handled by an activation layer.
telsoa01c577f2c2018-08-31 09:22:23 +01001739
1740 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1741 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1742 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1743 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1744 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001745 ARMNN_ASSERT(outputOfLeakyRelu != nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01001746
1747 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1748 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1749 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1750 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1751 }
1752 else
1753 {
Sadik Armagan975c09a2018-12-04 10:02:08 +00001754 // Anything else is just a maximum layer.
1755
1756 return AddMaximumLayer(nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01001757 }
1758}
1759
jimfly0184c70e62018-12-19 13:14:46 +00001760std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
1761 const tensorflow::NodeDef& nodeDef, const std::string& layerName)
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001762{
1763 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1764
1765 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1766 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1767 const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1768 const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1769
1770 if (input0Dim != input1Dim)
1771 {
1772 // broadcasting where input0 and input1 have different number of dimensions
1773 // is only supported for 1D and 4D tensors pair
1774 if (input0Dim == 1 && input1Dim == 4)
1775 {
1776 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1777 }
1778 else if (input0Dim == 4 && input1Dim == 1)
1779 {
1780 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1781 }
1782 else
1783 {
1784 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001785 fmt::format("Unsupported broadcast configuration for {} operation {} {}",
1786 layerName,
1787 nodeDef.name(),
1788 CHECK_LOCATION().AsString()));
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001789 }
1790 }
jimfly0184c70e62018-12-19 13:14:46 +00001791 return {input0Slot, input1Slot};
1792}
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001793
kevmay012b4d88e2019-01-24 14:05:09 +00001794ParsedTfOperationPtr TfParser::ProcessComparisonLayer(
1795 IOutputSlot* input0Slot,
1796 IOutputSlot* input1Slot,
1797 IConnectableLayer* const layer,
1798 const tensorflow::NodeDef& nodeDef)
1799{
1800 input0Slot->Connect(layer->GetInputSlot(0));
1801 input1Slot->Connect(layer->GetInputSlot(1));
1802
1803 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1804 outputInfo.SetDataType(DataType::Boolean);
1805 std::vector<unsigned int> outputShape;
1806
1807 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1808 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1809
1810 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1811 {
1812 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1813 }
1814
1815 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1816 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1817
1818 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1819}
1820
jimfly0184c70e62018-12-19 13:14:46 +00001821ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
1822 IOutputSlot* input0Slot,
1823 IOutputSlot* input1Slot,
1824 IConnectableLayer* const layer,
1825 const tensorflow::NodeDef& nodeDef)
1826{
Nattapat Chaimanowong24df8222018-12-04 13:47:02 +00001827 input0Slot->Connect(layer->GetInputSlot(0));
1828 input1Slot->Connect(layer->GetInputSlot(1));
1829
1830 TensorInfo outputInfo = input0Slot->GetTensorInfo();
1831 std::vector<unsigned int> outputShape;
1832
1833 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1834 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1835
1836 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1837 {
1838 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1839 }
1840
1841 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1842 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1843
1844 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1845}
1846
FrancisMurtagh94412af2019-01-24 10:53:39 +00001847ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
1848 const tensorflow::GraphDef& graphDef)
1849{
Jan Eilers8eb25602020-03-09 12:13:48 +00001850 IgnoreUnused(graphDef);
FrancisMurtagh94412af2019-01-24 10:53:39 +00001851 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1852 IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1853 IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
Teresa Charlin52664732020-06-29 16:27:03 +01001854 GatherDescriptor descriptor;
1855 descriptor.m_Axis = ReadMandatoryNodeInt32Attribute(nodeDef, "axis");
FrancisMurtagh94412af2019-01-24 10:53:39 +00001856
1857 // Infer shape of output tensor
1858 unsigned int paramsDim = params.GetTensorInfo().GetNumDimensions();
1859 unsigned int indicesDim = indices.GetTensorInfo().GetNumDimensions();
1860 unsigned int outputDim = paramsDim - 1 + indicesDim;
1861
1862 std::vector<unsigned int> dimSizes;
1863
1864 for (unsigned int i = 0; i < indicesDim; ++i)
1865 {
1866 dimSizes.push_back(indices.GetTensorInfo().GetShape()[i]);
1867 }
1868 for (unsigned int i = 1; i < paramsDim; ++i)
1869 {
1870 dimSizes.push_back(params.GetTensorInfo().GetShape()[i]);
1871 }
1872
1873 const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
1874
1875 const TensorInfo inferredOutputInfo(inferredShape, params.GetTensorInfo().GetDataType());
1876
Teresa Charlin52664732020-06-29 16:27:03 +01001877 IConnectableLayer* const layer = m_Network->AddGatherLayer(descriptor, nodeDef.name().c_str());
FrancisMurtagh94412af2019-01-24 10:53:39 +00001878 layer->GetOutputSlot(0).SetTensorInfo(inferredOutputInfo);
1879
1880 params.Connect(layer->GetInputSlot(0));
1881 indices.Connect(layer->GetInputSlot(1));
1882
1883 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1884}
1885
jimfly01a06bf312018-12-18 16:24:51 +00001886ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
1887 const tensorflow::GraphDef& graphDef)
1888{
Jan Eilers8eb25602020-03-09 12:13:48 +00001889 IgnoreUnused(graphDef);
jimfly01a06bf312018-12-18 16:24:51 +00001890 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1891 IOutputSlot* input0Slot = inputLayers.first;
1892 IOutputSlot* input1Slot = inputLayers.second;
1893
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001894 ComparisonDescriptor descriptor(ComparisonOperation::Greater);
1895 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly01a06bf312018-12-18 16:24:51 +00001896
kevmay012b4d88e2019-01-24 14:05:09 +00001897 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly01a06bf312018-12-18 16:24:51 +00001898}
1899
jimfly0184c70e62018-12-19 13:14:46 +00001900ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
1901 const tensorflow::GraphDef& graphDef)
1902{
Jan Eilers8eb25602020-03-09 12:13:48 +00001903 IgnoreUnused(graphDef);
jimfly0184c70e62018-12-19 13:14:46 +00001904 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1905 IOutputSlot* input0Slot = inputLayers.first;
1906 IOutputSlot* input1Slot = inputLayers.second;
1907
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01001908 ComparisonDescriptor descriptor(ComparisonOperation::Equal);
1909 IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
jimfly0184c70e62018-12-19 13:14:46 +00001910
kevmay012b4d88e2019-01-24 14:05:09 +00001911 return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
jimfly0184c70e62018-12-19 13:14:46 +00001912}
1913
1914ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
1915 const tensorflow::GraphDef& graphDef)
1916{
Jan Eilers8eb25602020-03-09 12:13:48 +00001917 IgnoreUnused(graphDef);
jimfly0184c70e62018-12-19 13:14:46 +00001918 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1919 IOutputSlot* input0Slot = inputLayers.first;
1920 IOutputSlot* input1Slot = inputLayers.second;
1921
1922 IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1923
1924 return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1925}
1926
jimfly0123be07e2018-12-04 17:47:22 +00001927ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1928{
Jan Eilers8eb25602020-03-09 12:13:48 +00001929 IgnoreUnused(graphDef);
jimfly0123be07e2018-12-04 17:47:22 +00001930 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1931
1932 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1933 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1934
1935 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1936 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1937
1938 if (input0Info.GetNumDimensions() == 1)
1939 {
1940 const bool isNHWC = true;
1941 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1942 }
1943
1944 if (input1Info.GetNumDimensions() == 1)
1945 {
1946 const bool isNHWC = true;
1947 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1948 }
1949
1950 IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1951
1952 input0Slot->Connect(layer->GetInputSlot(0));
1953 input1Slot->Connect(layer->GetInputSlot(1));
1954
1955 if (input0Info.GetNumDimensions() == 1)
1956 {
1957 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
1958 }
1959 else
1960 {
1961 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
1962 }
1963
1964 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1965}
1966
Sadik Armagan48d70932020-02-18 15:18:27 +00001967ParsedTfOperationPtr TfParser::ParseStack(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1968{
Jan Eilers8eb25602020-03-09 12:13:48 +00001969 IgnoreUnused(graphDef);
Sadik Armagan48d70932020-02-18 15:18:27 +00001970 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
1971
1972 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
1973 if (numInputs < 1)
1974 {
1975 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001976 fmt::format("Pack/Stack expects at least one input. Got {} for Node {} {}",
1977 numInputs,
1978 nodeDef.name(),
1979 CHECK_LOCATION().AsString()));
Sadik Armagan48d70932020-02-18 15:18:27 +00001980 }
1981
1982 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
1983 // Use the tensor shape of the first input as the "correct" input shape in the descriptor
1984 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1985 const TensorInfo& inputTensorInfo = input0Slot->GetTensorInfo();
1986 auto numDimensions = inputTensorInfo.GetShape().GetNumDimensions();
1987
1988 // validate axis
1989 int32_t axis = ReadMandatoryNodeInt32Attribute(nodeDef, "axis");
1990 const int sNumDimensions = (static_cast<int>(numDimensions) + 1);
1991 if (!(axis < sNumDimensions && axis >= -sNumDimensions))
1992 {
1993 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01001994 fmt::format("Axis index is not in range. Got {} for Node {} {}",
1995 axis,
1996 nodeDef.name(),
1997 CHECK_LOCATION().AsString()));
Sadik Armagan48d70932020-02-18 15:18:27 +00001998 }
1999
2000 if (axis < 0)
2001 {
2002 axis = static_cast<int32_t>(numDimensions) + axis + 1;
2003 }
2004
2005 StackDescriptor stackDescriptor;
2006 stackDescriptor.m_Axis = static_cast<uint32_t>(axis);
2007 stackDescriptor.m_NumInputs = static_cast<uint32_t>(numInputs);
2008 stackDescriptor.m_InputShape = inputTensorInfo.GetShape();
2009
2010 const unsigned int supportedNumDims = 4;
2011 for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2012 {
2013 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2014 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2015
2016 // Double check dimensions of the tensors
2017 if (inputTensorInfo.GetNumDimensions() >= supportedNumDims)
2018 {
2019 throw armnn::ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002020 fmt::format("The number of dimensions: {} for input tensors of the "
2021 "Pack/Stack op. Number of dimensions should be less than {} {}",
2022 inputTensorInfo.GetNumDimensions(),
2023 supportedNumDims,
2024 CHECK_LOCATION().AsString()));
Sadik Armagan48d70932020-02-18 15:18:27 +00002025 }
2026 }
2027
2028 std::vector<unsigned int> outputDimensions;
2029 for (unsigned int i = 0; i < stackDescriptor.m_InputShape.GetNumDimensions(); ++i)
2030 {
2031 outputDimensions.push_back(stackDescriptor.m_InputShape[i]);
2032 }
2033 outputDimensions.insert(outputDimensions.begin() + axis, numInputs);
2034
2035 // add Stack Layer
2036 IConnectableLayer* const layer = m_Network->AddStackLayer(stackDescriptor, nodeDef.name().c_str());
2037
2038 for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2039 {
2040 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2041 inputSlot.Connect(layer->GetInputSlot(viewIndex));
2042 }
2043
2044 layer->GetOutputSlot(0).SetTensorInfo(
2045 armnn::TensorInfo(static_cast<uint32_t>(outputDimensions.size()),
2046 outputDimensions.data(),
2047 inputTensorInfo.GetDataType()));
2048
2049 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2050}
2051
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002052ParsedTfOperationPtr TfParser::ParseTranspose(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2053{
Jan Eilers8eb25602020-03-09 12:13:48 +00002054 IgnoreUnused(graphDef);
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002055
2056 auto inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2057 const auto inputCount = inputs.size();
2058
2059 if (inputCount != 2)
2060 {
2061 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002062 fmt::format("The number of given input is {}. It should be two for Transpose op."
2063 "Node {} {}",
2064 inputCount,
2065 nodeDef.name(),
2066 CHECK_LOCATION().AsString()));
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002067 }
2068
2069 auto* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2070
2071 const auto constInput = inputs[GetConstInputIndex(inputs)];
2072 auto* permuteVectorInput =
Jan Eilersbb446e52020-04-02 13:56:54 +01002073 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(constInput.m_IndexedValue);
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002074 const auto& permuteVectorInfo = permuteVectorInput->GetTensorInfo();
2075
2076 std::vector<int32_t> permuteVectorData;
2077 permuteVectorInput->GetConstTensor(permuteVectorData);
2078
Mike Kelly08759e22020-03-02 11:41:31 +00002079 std::vector<unsigned int> armnnPermuteVectorData(permuteVectorData.begin(), permuteVectorData.end());
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002080
2081 const auto permutationVector = PermutationVector(armnnPermuteVectorData.data(), permuteVectorInfo.GetNumElements());
Mike Kelly08759e22020-03-02 11:41:31 +00002082 const auto desc = TransposeDescriptor(permutationVector);
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002083
Mike Kelly08759e22020-03-02 11:41:31 +00002084 auto* layer = m_Network->AddTransposeLayer(desc, nodeDef.name().c_str());
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002085 ARMNN_ASSERT(layer);
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002086
2087 input0Slot->Connect(layer->GetInputSlot(0));
2088
2089 const auto& input0Info = input0Slot->GetTensorInfo();
2090 armnn::TensorInfo outputInfo {input0Info};
Mike Kelly08759e22020-03-02 11:41:31 +00002091 outputInfo.SetShape(armnnUtils::TransposeTensorShape(input0Info.GetShape(), desc.m_DimMappings));
Sang-Hoon Parkdd3f71b2020-02-18 11:27:35 +00002092 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2093
2094 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2095}
2096
jimfly01f6ba7472018-12-04 10:09:52 +00002097unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
2098 const TensorInfo& inputTensorInfo,
2099 const std::string& nodeName)
2100{
2101 unsigned int rank = paddingTensor.GetShape()[0];
2102 unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
2103 if (rank != expectedRank)
2104 {
2105 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002106 fmt::format("Expected the padding tensor to be of rank {} not {} on Node {} {}.",
2107 expectedRank,
2108 rank,
2109 nodeName,
2110 CHECK_LOCATION().AsString()));
jimfly01f6ba7472018-12-04 10:09:52 +00002111 }
2112 unsigned int second = paddingTensor.GetShape()[1];
2113 if (second != 2)
2114 {
2115 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002116 fmt::format("Expected the padding tensor to be of dimensions "
2117 "[{1}, 2] not [{1}, {2}] on Node {3} {4}.",
2118 rank,
2119 second,
2120 nodeName,
2121 CHECK_LOCATION().AsString()));
jimfly01f6ba7472018-12-04 10:09:52 +00002122 }
2123 return rank;
2124}
2125
2126TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
2127 const std::vector<std::pair<unsigned int, unsigned int>>& padList)
2128{
2129 unsigned int numDims = inputTensorInfo.GetNumDimensions();
2130 std::vector<unsigned int> outDims;
2131 for (unsigned int i = 0; i < numDims; ++i)
2132 {
2133 unsigned int dimSize = inputTensorInfo.GetShape()[i];
2134 const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2135 dimSize += dimPadding.first;
2136 dimSize += dimPadding.second;
2137 outDims.push_back(dimSize);
2138 }
2139 TensorInfo paddedTensorInfo = inputTensorInfo;
2140 unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
2141 paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
2142 return paddedTensorInfo;
2143}
2144
2145ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
2146 const tensorflow::GraphDef& graphDef)
2147{
Jan Eilers8eb25602020-03-09 12:13:48 +00002148 IgnoreUnused(graphDef);
jimfly01f6ba7472018-12-04 10:09:52 +00002149 // input consists of:
2150 // input[0] the tensor which will be padded
2151 // input[1] the tensor holding the padding values
2152 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2153 IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2154 TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
2155 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
2156 {
2157 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002158 fmt::format("ArmNN only supports Pad with constant padding. "
2159 "Input {}. Node {} {}",
2160 inputs[1].m_IndexedValue->GetNode().name(),
2161 nodeDef.name(),
2162 CHECK_LOCATION().AsString()));
jimfly01f6ba7472018-12-04 10:09:52 +00002163
2164 }
2165 ParsedConstTfOperation<int32_t>* paddingTensorOp =
Jan Eilersbb446e52020-04-02 13:56:54 +01002166 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
jimfly01f6ba7472018-12-04 10:09:52 +00002167
2168 std::vector<int32_t> paddingTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002169 ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
jimfly01f6ba7472018-12-04 10:09:52 +00002170 // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
2171 // and should match the rank of the input tensor that is being padded.
2172 // For each dimension D of input, paddings[D, 0] indicates how many values to add
2173 // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
2174 // many values to add after the contents of tensor in that dimension
2175 // This needs to be translated into a padList for ACL
2176 std::vector<std::pair<unsigned int, unsigned int>> padList;
2177 unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2178 for (unsigned int i = 0; i < rank; ++i)
2179 {
2180 std::pair<unsigned int, unsigned int> paddingForDim;
2181 for (unsigned int j = 0; j < 2; j++)
2182 {
2183 unsigned int index = (i * 2) + j;
2184 int paddingAmount = paddingTensorData[index];
2185 // make sure we can cast to an unsigned value
2186 if (paddingAmount < 0)
2187 {
2188 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002189 fmt::format("Negative amount {} specified at [{}, {}] of padding tensor on Node {} {}.",
2190 paddingAmount,
2191 i,
2192 j,
2193 nodeDef.name(),
2194 CHECK_LOCATION().AsString()));
jimfly01f6ba7472018-12-04 10:09:52 +00002195 }
2196 if (j == 0)
2197 {
2198 paddingForDim.first = static_cast<unsigned int>(paddingAmount);
2199 }
2200 else
2201 {
2202 paddingForDim.second = static_cast<unsigned int>(paddingAmount);
2203 }
2204 }
2205 padList.push_back(paddingForDim);
2206 }
2207 PadDescriptor padDescriptor(padList);
2208 IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
2209 previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
2210 // Use the padding to calculate the new output tensor shape
2211 TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
2212 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2213 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2214}
2215
surmeh01bceff2f2018-03-29 16:29:27 +01002216ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
2217 const tensorflow::GraphDef& graphDef)
2218{
Jan Eilers8eb25602020-03-09 12:13:48 +00002219 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002220 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002221
telsoa01c577f2c2018-08-31 09:22:23 +01002222 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
surmeh01bceff2f2018-03-29 16:29:27 +01002223 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
surmeh01bceff2f2018-03-29 16:29:27 +01002224
surmeh01bceff2f2018-03-29 16:29:27 +01002225 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2226
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002227 // Constant tensor index
2228 unsigned int index = GetConstInputIndex(inputs);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002229 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002230 ParsedConstTfOperation<int32_t>* shapeNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002231 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002232
surmeh01bceff2f2018-03-29 16:29:27 +01002233 std::vector<int32_t> axisTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002234 shapeNode->GetConstTensor(axisTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002235
telsoa01c577f2c2018-08-31 09:22:23 +01002236 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002237 const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
surmeh01bceff2f2018-03-29 16:29:27 +01002238
telsoa01c577f2c2018-08-31 09:22:23 +01002239 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
Matteo Martincighf9afc792018-12-06 12:03:17 +00002240 if (concatDim == 0 || concatDim == 2)
surmeh01bceff2f2018-03-29 16:29:27 +01002241 {
telsoa01c577f2c2018-08-31 09:22:23 +01002242 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002243 fmt::format("Dimension {} for concatenation is not supported by Armnn. "
2244 "Node {} {}",
2245 concatDim,
2246 nodeDef.name(),
2247 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002248 }
2249
Matthew Jacksondba634f2019-08-15 15:14:18 +01002250 const unsigned int supportedNumDims = 4;
Matteo Martincighf9afc792018-12-06 12:03:17 +00002251 unsigned int numConcatViews = numInputs - 1;
Matthew Jacksondba634f2019-08-15 15:14:18 +01002252 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002253 concatDescriptor.SetConcatAxis(concatDim);
Matthew Jacksondba634f2019-08-15 15:14:18 +01002254 TensorShape mergeDims(supportedNumDims);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002255 unsigned int mergeDim = 0;
2256 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002257 {
telsoa01c577f2c2018-08-31 09:22:23 +01002258 // Need to double check whether it should be
Matteo Martincighf9afc792018-12-06 12:03:17 +00002259 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002260 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2261
Matteo Martincighf9afc792018-12-06 12:03:17 +00002262 // Double check dimensions of the tensors
Matthew Jacksondba634f2019-08-15 15:14:18 +01002263 if (inputTensorInfo.GetNumDimensions() != supportedNumDims)
Matteo Martincighf9afc792018-12-06 12:03:17 +00002264 {
2265 throw armnn::ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002266 fmt::format("The number of dimensions: {} for input tensors of the "
2267 "concatenation op should be {} {}",
2268 inputTensorInfo.GetNumDimensions(),
2269 supportedNumDims,
2270 CHECK_LOCATION().AsString()));
Matteo Martincighf9afc792018-12-06 12:03:17 +00002271 }
2272
2273 // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2274 mergeDims = inputTensorInfo.GetShape();
2275 unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
Matthew Jacksondba634f2019-08-15 15:14:18 +01002276 std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
Matteo Martincighf9afc792018-12-06 12:03:17 +00002277
2278 // Update the view origin coordinates and the merge dimension value
2279 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2280 mergeDim += mergeDims[concatDim];
surmeh01bceff2f2018-03-29 16:29:27 +01002281 }
2282
Matteo Martincighf9afc792018-12-06 12:03:17 +00002283 // Update the output shape
2284 mergeDims[concatDim] = mergeDim;
Jim Flynn906f9462019-05-10 13:55:21 +01002285 armnn::IConnectableLayer *layer = m_Network->AddConcatLayer(concatDescriptor, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002286
Matteo Martincighf9afc792018-12-06 12:03:17 +00002287 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
surmeh01bceff2f2018-03-29 16:29:27 +01002288
Matteo Martincighf9afc792018-12-06 12:03:17 +00002289 for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
surmeh01bceff2f2018-03-29 16:29:27 +01002290 {
Matteo Martincighf9afc792018-12-06 12:03:17 +00002291 IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2292 inputSlot.Connect(layer->GetInputSlot(viewIndex));
surmeh01bceff2f2018-03-29 16:29:27 +01002293 }
2294
2295 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2296}
2297
2298ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
2299 const tensorflow::GraphDef& graphDef)
2300{
Jan Eilers8eb25602020-03-09 12:13:48 +00002301 IgnoreUnused(graphDef);
telsoa01c577f2c2018-08-31 09:22:23 +01002302 // Note: the Shape layer is handled in a special way, because:
2303 // 1. ARMNN doesn't support int32 tensors which it outputs.
2304 // 2. ARMNN works with statically shaped tensors which are known at parse time.
surmeh01bceff2f2018-03-29 16:29:27 +01002305 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
telsoa01c577f2c2018-08-31 09:22:23 +01002306 // tensor which may be used as an input to other ops, most likely a Reshape.
surmeh01bceff2f2018-03-29 16:29:27 +01002307
2308 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2309 if (tfDataType != tensorflow::DT_INT32)
2310 {
telsoa01c577f2c2018-08-31 09:22:23 +01002311 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002312 fmt::format("Armnn only supports DT_INT32 as out_type. Got {} for Node {} {}",
2313 tensorflow::DataType_Name(tfDataType),
2314 nodeDef.name(),
2315 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002316 }
2317
2318 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2319 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2320 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2321 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2322
2323 std::vector<int32_t> shapeTensorData;
2324 shapeTensorData.reserve(prevLayerDimensions);
2325
2326 for (unsigned int i=0; i<prevLayerDimensions; ++i)
2327 {
2328 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2329 }
2330
2331 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2332
2333 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2334 nodeDef,
2335 &shapeTensorData[0],
2336 shapeTensorInfo);
2337}
2338
2339ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
2340 const tensorflow::GraphDef& graphDef)
2341{
Jan Eilers8eb25602020-03-09 12:13:48 +00002342 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002343 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2344 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2345
2346 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2347 {
telsoa01c577f2c2018-08-31 09:22:23 +01002348 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002349 fmt::format("ArmNN only supports Reshape layers with constant shapes. "
2350 "Input {} Node {} {}",
2351 inputs[1].m_IndexedValue->GetNode().name(),
2352 nodeDef.name(),
2353 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002354 }
2355 ParsedConstTfOperation<int32_t>* shapeNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002356 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01002357
2358 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2359 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2360
2361 std::vector<int32_t> shapeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002362 ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002363 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2364
2365 TensorShape targetShape = outputTensorInfo.GetShape();
2366 ReshapeDescriptor reshapeDesc;
2367 reshapeDesc.m_TargetShape = targetShape;
2368
2369 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2370 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2371 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2372
2373 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2374}
2375
2376ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
2377 const tensorflow::GraphDef& graphDef)
2378{
Jan Eilers8eb25602020-03-09 12:13:48 +00002379 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002380 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2381
2382 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2383 {
telsoa01c577f2c2018-08-31 09:22:23 +01002384 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002385 fmt::format("ArmNN only supports ResizeBilinear layers with constant sizes. "
2386 "Input {}. Node {} {}",
2387 inputs[1].m_IndexedValue->GetNode().name(),
2388 nodeDef.name(),
2389 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002390 }
2391 ParsedConstTfOperation<int32_t>* sizeNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002392 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01002393
telsoa01c577f2c2018-08-31 09:22:23 +01002394 // Checks the align_corners attribute is not set.
surmeh01bceff2f2018-03-29 16:29:27 +01002395 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2396 {
telsoa01c577f2c2018-08-31 09:22:23 +01002397 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002398 fmt::format("ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2399 "Node {} {}",
2400 nodeDef.name(),
2401 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002402 }
2403
telsoa01c577f2c2018-08-31 09:22:23 +01002404 // Data for the parsed tensor args (size) must be stored locally.
surmeh01bceff2f2018-03-29 16:29:27 +01002405 std::vector<int32_t> sizeTensorData;
Matteo Martincigh482ca852018-12-12 09:20:55 +00002406 ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01002407
telsoa01c577f2c2018-08-31 09:22:23 +01002408 // The descriptor only has target height and width attributes, which we get from the size tensor.
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002409 ResizeDescriptor desc;
2410 desc.m_Method = armnn::ResizeMethod::Bilinear;
surmeh01bceff2f2018-03-29 16:29:27 +01002411 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002412 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2413 desc.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002414
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01002415 IConnectableLayer* layer = m_Network->AddResizeLayer(desc, nodeDef.name().c_str());
surmeh01bceff2f2018-03-29 16:29:27 +01002416
2417 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2418 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
telsoa01c577f2c2018-08-31 09:22:23 +01002419 // The input shape is always in BHWC format, this will be swizzled below; for now,
2420 // get the batch and channels to make up the ArmNN output shape with the target size.
surmeh01bceff2f2018-03-29 16:29:27 +01002421 unsigned int outBatch = inputTensorInfo.GetShape()[0];
2422 unsigned int outChannels = inputTensorInfo.GetShape()[3];
2423 unsigned int outHeight = desc.m_TargetHeight;
2424 unsigned int outWidth = desc.m_TargetWidth;
jimfly018a121502018-12-06 16:19:52 +00002425 TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
telsoa01c577f2c2018-08-31 09:22:23 +01002426 // The output DataType is always Float32, regardless of the input DataType.
surmeh01bceff2f2018-03-29 16:29:27 +01002427 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2428 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2429
jimfly018a121502018-12-06 16:19:52 +00002430 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01002431
2432 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2433}
2434
2435TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2436{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002437 ARMNN_ASSERT(nodeDef.op() == "Squeeze");
surmeh01bceff2f2018-03-29 16:29:27 +01002438 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2439
2440 DataType type;
2441 if (tfDataType == tensorflow::DT_FLOAT)
2442 {
2443 type = DataType::Float32;
2444 }
2445 else if (tfDataType == tensorflow::DT_INT32)
2446 {
2447 type = DataType::Signed32;
2448 }
2449 else
2450 {
telsoa01c577f2c2018-08-31 09:22:23 +01002451 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002452 fmt::format("Unsupported DataType {} for Squeeze operation {} {}",
2453 tensorflow::DataType_Name(tfDataType),
2454 nodeDef.name(),
2455 CHECK_LOCATION().AsString()));
telsoa01c577f2c2018-08-31 09:22:23 +01002456 }
2457
2458
2459 if (inputTensorInfo.GetNumDimensions() > 4)
2460 {
2461 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002462 fmt::format("Unsupported number of dimensions: {} for input shape for Squeeze {} {}",
2463 inputTensorInfo.GetNumDimensions(),
2464 nodeDef.name(),
2465 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002466 }
2467
2468 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
telsoa01c577f2c2018-08-31 09:22:23 +01002469 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2470
surmeh01bceff2f2018-03-29 16:29:27 +01002471 if (squeezeDims.empty())
2472 {
telsoa01c577f2c2018-08-31 09:22:23 +01002473 squeezeDims.assign(dimensionSequence,
2474 dimensionSequence+inputTensorInfo.GetNumDimensions());
surmeh01bceff2f2018-03-29 16:29:27 +01002475 }
2476
2477 std::vector<uint32_t> outputDims;
2478 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2479 {
telsoa01c577f2c2018-08-31 09:22:23 +01002480 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2481 auto currentDimension = inputTensorInfo.GetShape()[i];
2482 if (skipSqueeze || currentDimension != 1)
surmeh01bceff2f2018-03-29 16:29:27 +01002483 {
telsoa01c577f2c2018-08-31 09:22:23 +01002484 outputDims.push_back(currentDimension);
surmeh01bceff2f2018-03-29 16:29:27 +01002485 }
2486 }
2487
2488 if (outputDims.size() > 4)
2489 {
telsoa01c577f2c2018-08-31 09:22:23 +01002490 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002491 fmt::format("Unsupported number of dimensions: {} for output shape for Squeeze {} {}",
2492 outputDims.size(),
2493 nodeDef.name(),
2494 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002495 }
2496
telsoa01c577f2c2018-08-31 09:22:23 +01002497 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2498 outputDims.data());
2499
2500 TensorInfo outTensorInfo = inputTensorInfo;
2501 outTensorInfo.SetShape(outShape);
2502 outTensorInfo.SetDataType(type);
surmeh01bceff2f2018-03-29 16:29:27 +01002503
2504 return outTensorInfo;
2505}
2506
2507ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2508{
Jan Eilers8eb25602020-03-09 12:13:48 +00002509 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002510 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2511
2512 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2513 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2514
2515 TensorInfo outputInfo;
2516 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2517
2518 ReshapeDescriptor reshapeDesc;
2519 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2520 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2521 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2522 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2523
2524 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2525}
2526
2527ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2528{
Jan Eilers8eb25602020-03-09 12:13:48 +00002529 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002530 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2531
2532 NormalizationDescriptor normalizationDescriptor;
2533 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2534 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2535 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2536 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2537 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2538 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
ruoyan018174f362018-12-04 18:24:08 +00002539 normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
surmeh01bceff2f2018-03-29 16:29:27 +01002540
2541 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2542 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2543
2544 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
surmeh01bceff2f2018-03-29 16:29:27 +01002545 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2546 nodeDef.name().c_str());
ruoyan018174f362018-12-04 18:24:08 +00002547 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2548 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
surmeh01bceff2f2018-03-29 16:29:27 +01002549
2550 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2551}
2552
2553/// An ParsedTfOperation for a MatMul node.
telsoa01c577f2c2018-08-31 09:22:23 +01002554/// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2555/// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2556/// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2557///
surmeh01bceff2f2018-03-29 16:29:27 +01002558class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
2559{
2560public:
2561 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2562 : DeferredSingleLayerParsedTfOperation(parser, node)
2563 {
2564 }
2565
2566 void CreateLayerDeferred() override
2567 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002568 ARMNN_ASSERT(m_Layer == nullptr);
surmeh01bceff2f2018-03-29 16:29:27 +01002569 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2570 }
2571};
2572
2573ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2574{
Jan Eilers8eb25602020-03-09 12:13:48 +00002575 IgnoreUnused(graphDef);
Derek Lambertibaa177f2019-12-10 22:00:43 +00002576
telsoa01c577f2c2018-08-31 09:22:23 +01002577 // Defers the creation of the layer (see ParsedMatMulTfOperation).
surmeh01bceff2f2018-03-29 16:29:27 +01002578 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2579}
2580
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002581ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2582{
Jan Eilers8eb25602020-03-09 12:13:48 +00002583 IgnoreUnused(graphDef);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002584 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2585 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2586 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2587
2588 if (inputs.size() != 2)
2589 {
2590 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002591 fmt::format("Mean expects two inputs!. Got {} for Node {} {}",
2592 inputs.size(),
2593 nodeDef.name(),
2594 CHECK_LOCATION().AsString()));
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002595 }
2596
2597 bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims");
2598
2599 ParsedConstTfOperation<int32_t>* axisNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002600 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002601
2602 const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2603
2604 ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2605 const int* axisData = static_cast<const int*>(axisTensor.GetMemoryArea());
2606
2607 TensorInfo outputTensorInfo;
2608 MeanDescriptor meanDescriptor;
2609 meanDescriptor.m_KeepDims = keepDims;
2610
2611 // Negative axis values are supported so that the process requires
2612 // to convert them into the corresponding positive ones.
2613 // Duplicate values are also removed.
2614 std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2615 std::set<unsigned int> positiveAxisSet;
2616 int rank = static_cast<int>(inputTensorInfo.GetNumDimensions());
2617
2618 std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2619 std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2620 [rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
2621
Derek Lambertibaa177f2019-12-10 22:00:43 +00002622 CalculateReducedOutputTensoInfo(inputTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
Ferran Balaguer51dd62f2019-01-11 19:29:18 +00002623
2624 if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
2625 {
2626 meanDescriptor.m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2627 }
2628
2629 IConnectableLayer* layer = m_Network->AddMeanLayer(meanDescriptor, nodeDef.name().c_str());
2630 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2631 inputSlot.Connect(layer->GetInputSlot(0));
2632
2633 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2634}
2635
telsoa01c577f2c2018-08-31 09:22:23 +01002636/// An ParsedTfOperation for a Mul node.
2637/// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2638/// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2639/// and in these cases armnn doesn't need a separate layer for the Mul.
2640///
2641class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
2642{
2643public:
2644 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
2645 : DeferredSingleLayerParsedTfOperation(parser, node)
2646 {
2647 }
2648
2649 void CreateLayerDeferred() override
2650 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002651 ARMNN_ASSERT(m_Layer == nullptr);
telsoa01c577f2c2018-08-31 09:22:23 +01002652 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2653 }
2654};
2655
surmeh01bceff2f2018-03-29 16:29:27 +01002656ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2657{
Jan Eilers8eb25602020-03-09 12:13:48 +00002658 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002659
telsoa01c577f2c2018-08-31 09:22:23 +01002660 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002661}
2662
2663ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
2664 const tensorflow::GraphDef& graphDef)
2665{
Jan Eilers8eb25602020-03-09 12:13:48 +00002666 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002667
2668 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2669
Matthew Sloyan589e3e82020-09-11 16:17:48 +01002670 const LayerBindingId layerId = armnn::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
surmeh01bceff2f2018-03-29 16:29:27 +01002671
2672 auto it = m_InputShapes.find(nodeDef.name());
2673 if (it == m_InputShapes.end())
2674 {
telsoa01c577f2c2018-08-31 09:22:23 +01002675 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002676 fmt::format("Missing input shape for Placeholder '{}' {}",
2677 nodeDef.name(),
2678 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002679 }
2680 TensorInfo tensorInfo(it->second, DataType::Float32);
2681
2682 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2683
2684 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2685
2686 TrackInputBinding(layer, layerId, tensorInfo);
2687
2688 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2689}
2690
saoste01bbd40612018-08-28 15:41:51 +01002691ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2692{
Jan Eilers8eb25602020-03-09 12:13:48 +00002693 IgnoreUnused(graphDef);
saoste01bbd40612018-08-28 15:41:51 +01002694 return AddRealDivLayer(nodeDef);
2695}
2696
surmeh01bceff2f2018-03-29 16:29:27 +01002697ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
2698 const tensorflow::GraphDef& graphDef)
2699{
Jan Eilers8eb25602020-03-09 12:13:48 +00002700 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002701
2702 ActivationDescriptor activationDesc;
2703 activationDesc.m_Function = ActivationFunction::ReLu;
2704 return AddActivationLayer(nodeDef, activationDesc);
2705}
2706
2707ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
2708 const tensorflow::GraphDef& graphDef)
2709{
Jan Eilers8eb25602020-03-09 12:13:48 +00002710 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002711
2712 ActivationDescriptor activationDesc;
2713 activationDesc.m_Function = ActivationFunction::BoundedReLu;
2714 activationDesc.m_A = 6.0f;
2715 activationDesc.m_B = 0.0f;
2716
2717 return AddActivationLayer(nodeDef, activationDesc);
2718}
2719
2720ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
2721 const tensorflow::GraphDef& graphDef)
2722{
Jan Eilers8eb25602020-03-09 12:13:48 +00002723 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002724
2725 ActivationDescriptor activationDesc;
2726 activationDesc.m_Function = ActivationFunction::Sigmoid;
2727
2728 return AddActivationLayer(nodeDef, activationDesc);
2729}
2730
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002731ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
2732 const tensorflow::GraphDef &graphDef)
2733{
Jan Eilers8eb25602020-03-09 12:13:48 +00002734 IgnoreUnused(graphDef);
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002735
2736 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2737
josh minor4a3c6102020-01-06 16:40:46 -06002738 ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
2739 IConnectableLayer* const layer = m_Network->AddElementwiseUnaryLayer(descriptor, nodeDef.name().c_str());
Mohamed Nour Abouelseoud7a8892f2019-01-09 14:19:58 +00002740
2741 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2742 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2743 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2744
2745 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2746}
2747
surmeh01bceff2f2018-03-29 16:29:27 +01002748ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
2749 const tensorflow::GraphDef& graphDef)
2750{
Jan Eilers8eb25602020-03-09 12:13:48 +00002751 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002752
2753 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2754
2755 SoftmaxDescriptor softmaxDescriptor;
2756 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2757
2758 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2759 prevLayerSlot.Connect(layer->GetInputSlot(0));
2760 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2761
2762 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2763}
2764
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002765ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
2766 const tensorflow::GraphDef& graphDef)
2767{
Jan Eilers8eb25602020-03-09 12:13:48 +00002768 IgnoreUnused(graphDef);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002769
2770 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2771 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2772 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2773
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002774 // Constant tensor index
2775 unsigned int index = GetConstInputIndex(inputs);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002776 // Get the axis tensor data
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002777 ParsedConstTfOperation<int32_t>* shapeNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002778 PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002779
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002780 std::vector<int32_t> axisTensorData;
2781 shapeNode->GetConstTensor(axisTensorData);
2782
2783 // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2784 const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2785
2786 // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2787 if (splitDim == 0 || splitDim == 2)
2788 {
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002789 throw armnn::ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002790 fmt::format("Dimension {} for split is not supported by Armnn. "
2791 "Node {} {}",
2792 splitDim,
2793 nodeDef.name(),
2794 CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002795 }
2796
Saoirse Stewart315258e2019-02-28 11:32:41 +00002797 // As Armnn only supports splitter outputs of the same shape, therefore num_split will be limited to an integer.
2798 uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_split");
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002799
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002800 IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002801 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2802
Matthew Jacksondba634f2019-08-15 15:14:18 +01002803 const unsigned int supportedNumDims = 4;
Saoirse Stewart91c0eff2019-02-27 11:07:57 +00002804 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2805
Matthew Jacksondba634f2019-08-15 15:14:18 +01002806 if (inputDimSize != supportedNumDims)
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002807 {
2808 throw armnn::ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002809 fmt::format("The number of dimensions: {} for input tensors of the "
2810 "split op should be {} {}",
2811 inputTensorInfo.GetNumDimensions(),
2812 supportedNumDims,
2813 CHECK_LOCATION().AsString()));
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002814 }
Sadik Armagan2ad6cb42018-12-27 11:23:44 +00002815
2816 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2817
2818 // Add current input shape to splitterDimSizes
2819 for (unsigned int i = 0; i < inputDimSize; ++i)
2820 {
2821 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2822 }
2823
2824 if (splitterDimSizes[splitDim] % num_split != 0)
2825 {
2826 throw ParseException("Number of splits must evenly divide the dimension");
2827 }
2828 splitterDimSizes[splitDim] /= num_split;
2829
2830 SplitterDescriptor splitDesc(num_split);
2831 for (unsigned int g = 0; g < num_split; ++g)
2832 {
2833 // Set the size of the views.
2834 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2835 {
2836 splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2837 }
2838 splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2839 }
2840
2841 IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2842
2843 inputSlot.Connect(layer->GetInputSlot(0));
2844
2845 TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2846 splitterDimSizes.data());
2847
2848 for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2849 {
2850 layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2851 }
2852
2853 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2854}
2855
surmeh01bceff2f2018-03-29 16:29:27 +01002856ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
2857 const tensorflow::GraphDef& graphDef)
2858{
Jan Eilers8eb25602020-03-09 12:13:48 +00002859 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002860
2861 ActivationDescriptor activationDesc;
2862 activationDesc.m_Function = ActivationFunction::SoftReLu;
2863
2864 return AddActivationLayer(nodeDef, activationDesc);
2865}
2866
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002867ParsedTfOperationPtr TfParser::ParseStridedSlice(const tensorflow::NodeDef& nodeDef,
2868 const tensorflow::GraphDef& graphDef)
2869{
Jan Eilers8eb25602020-03-09 12:13:48 +00002870 IgnoreUnused(graphDef);
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002871
2872 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2873 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2874 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2875
2876 ParsedConstTfOperation<int32_t>* beginNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002877 PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[1].m_IndexedValue);
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002878 std::vector<int32_t> beginTensorData;
2879 beginNode->GetConstTensor(beginTensorData);
2880
2881 ParsedConstTfOperation<int32_t>* endNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002882 PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[2].m_IndexedValue);
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002883 std::vector<int32_t> endTensorData;
2884 endNode->GetConstTensor(endTensorData);
2885
2886 ParsedConstTfOperation<int32_t>* stridesNode =
Jan Eilersbb446e52020-04-02 13:56:54 +01002887 PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[3].m_IndexedValue);
Georgios Pinitas5e90aab2020-02-14 14:46:51 +00002888 std::vector<int32_t> stridesTensorData;
2889 stridesNode->GetConstTensor(stridesTensorData);
2890
2891 StridedSliceDescriptor desc;
2892 desc.m_Begin = beginTensorData;
2893 desc.m_End = endTensorData;
2894 desc.m_Stride = stridesTensorData;
2895 desc.m_BeginMask = ReadMandatoryNodeInt32Attribute(nodeDef, "begin_mask");
2896 desc.m_EndMask = ReadMandatoryNodeInt32Attribute(nodeDef, "end_mask");
2897 desc.m_EllipsisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "ellipsis_mask");
2898 desc.m_NewAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "new_axis_mask");
2899 desc.m_ShrinkAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "shrink_axis_mask");
2900 desc.m_DataLayout = armnn::DataLayout::NHWC;
2901 IConnectableLayer* const layer = m_Network->AddStridedSliceLayer(desc, nodeDef.name().c_str());
2902
2903 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2904 TensorInfo inputTensorInfo = prevLayerSlot.GetTensorInfo();
2905
2906 TensorInfo outputTensorInfo;
2907 CalculateStridedSliceOutputTensorInfo(inputTensorInfo, desc, outputTensorInfo);
2908
2909 prevLayerSlot.Connect(layer->GetInputSlot(0));
2910 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2911
2912 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2913}
2914
surmeh01bceff2f2018-03-29 16:29:27 +01002915ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2916{
Jan Eilers8eb25602020-03-09 12:13:48 +00002917 IgnoreUnused(graphDef);
surmeh01bceff2f2018-03-29 16:29:27 +01002918
2919 ActivationDescriptor activationDesc;
2920 activationDesc.m_Function = ActivationFunction::TanH;
2921 activationDesc.m_A = 1.0f;
2922 activationDesc.m_B = 1.0f;
2923
2924 return AddActivationLayer(nodeDef, activationDesc);
2925}
2926
2927ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
2928 ActivationDescriptor& activationDesc)
2929{
2930 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2931
2932 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2933
2934 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2935 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2936 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2937 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2938}
2939
2940ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
2941 const tensorflow::GraphDef& graphDef)
2942{
2943 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
2944}
2945
2946ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
2947 const tensorflow::GraphDef& graphDef)
2948{
2949 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
2950}
2951
2952ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
2953 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
2954{
Jan Eilers8eb25602020-03-09 12:13:48 +00002955 IgnoreUnused(graphDef);
Derek Lambertibaa177f2019-12-10 22:00:43 +00002956
surmeh01bceff2f2018-03-29 16:29:27 +01002957 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2958 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2959 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2960
2961 if (inputs.size() != 1)
2962 {
telsoa01c577f2c2018-08-31 09:22:23 +01002963 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01002964 fmt::format("2D Pooling expects one input!. Got {} for Node {} {}",
2965 inputs.size(),
2966 nodeDef.name(),
2967 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01002968 }
2969
2970 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2971 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2972 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2973 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2974
2975 Pooling2dDescriptor pooling2dDescriptor;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002976 pooling2dDescriptor.m_PoolType = pooltype;
2977 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
surmeh01bceff2f2018-03-29 16:29:27 +01002978 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2979
telsoa01c577f2c2018-08-31 09:22:23 +01002980 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
FrancisMurtaghf005e312018-12-06 15:26:04 +00002981 DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
2982 pooling2dDescriptor.m_DataLayout = dataLayout;
2983 DataLayoutIndexed dataLayoutIndexed(dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +01002984
FrancisMurtaghf005e312018-12-06 15:26:04 +00002985 pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
2986 pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
2987 pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
2988 pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002989
FrancisMurtaghf005e312018-12-06 15:26:04 +00002990 uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
2991 uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
surmeh01bceff2f2018-03-29 16:29:27 +01002992
2993 bool padding = false;
2994 TensorInfo outputInfo;
FrancisMurtaghf005e312018-12-06 15:26:04 +00002995 unsigned int outputHeight = 0;
2996 unsigned int outputWidth = 0;
telsoa01c577f2c2018-08-31 09:22:23 +01002997
2998 CHECK_PADDING_TYPE(nodeDef, paddingString);
2999
surmeh01bceff2f2018-03-29 16:29:27 +01003000 if (paddingString == "SAME")
3001 {
3002 padding = true;
FrancisMurtaghf005e312018-12-06 15:26:04 +00003003
3004 outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
3005 static_cast<float>(pooling2dDescriptor.m_StrideY)));
3006 outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
3007 static_cast<float>(pooling2dDescriptor.m_StrideX)));
surmeh01bceff2f2018-03-29 16:29:27 +01003008 }
3009 else if (paddingString == "VALID")
3010 {
3011 padding = false;
FrancisMurtaghf005e312018-12-06 15:26:04 +00003012
3013 outputHeight = static_cast<uint32_t>(ceil(
3014 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
3015 static_cast<float>(pooling2dDescriptor.m_StrideY)));
3016 outputWidth = static_cast<uint32_t>(ceil(
3017 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
3018 static_cast<float>(pooling2dDescriptor.m_StrideX)));
3019 }
3020
3021 switch (dataLayout)
3022 {
3023 case DataLayout::NHWC:
3024 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
3025 outputHeight,
3026 outputWidth,
3027 inputTensorInfo.GetShape()[3] },
3028 DataType::Float32);
3029 break;
3030 case DataLayout::NCHW:
3031 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
3032 inputTensorInfo.GetShape()[1],
3033 outputHeight,
3034 outputWidth },
3035 DataType::Float32);
3036 break;
surmeh01bceff2f2018-03-29 16:29:27 +01003037 }
surmeh01bceff2f2018-03-29 16:29:27 +01003038
3039 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
FrancisMurtaghf005e312018-12-06 15:26:04 +00003040 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01003041 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
FrancisMurtaghf005e312018-12-06 15:26:04 +00003042 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
surmeh01bceff2f2018-03-29 16:29:27 +01003043
3044
3045 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
3046 if (layer == nullptr)
3047 {
telsoa01c577f2c2018-08-31 09:22:23 +01003048 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003049 fmt::format("Failed to add pooling2d layer for {} {}",
3050 nodeDef.name(),
3051 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003052 }
3053
3054 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3055
FrancisMurtaghf005e312018-12-06 15:26:04 +00003056 inputSlot.Connect(layer->GetInputSlot(0));
surmeh01bceff2f2018-03-29 16:29:27 +01003057
3058 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3059}
3060
3061ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
3062{
3063 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3064
3065 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3066 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3067
3068 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
3069 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
3070
3071 if (isBiasAdd)
3072 {
3073 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
3074 // with the same data in the correct dimension for broadcast in addition.
3075 if(input1Info.GetNumDimensions() != 1)
3076 {
telsoa01c577f2c2018-08-31 09:22:23 +01003077 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003078 fmt::format("Unsupported bias for BiasAdd. It should be a 1D vector. "
3079 "Got {} dimensions for input {}. Node {} {}",
3080 input1Info.GetNumDimensions(),
3081 inputs[1].m_IndexedValue->GetNode().name(),
3082 nodeDef.name(),
3083 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003084 }
3085
3086 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
surmeh01bceff2f2018-03-29 16:29:27 +01003087
telsoa01c577f2c2018-08-31 09:22:23 +01003088 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
saoste01bbd40612018-08-28 15:41:51 +01003089 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003090 }
3091 else
3092 {
3093 if (input0Info.GetNumDimensions() == 1)
3094 {
3095 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003096 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003097 }
3098
3099 if (input1Info.GetNumDimensions() == 1)
3100 {
3101 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003102 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
surmeh01bceff2f2018-03-29 16:29:27 +01003103 }
3104 }
3105
3106 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
3107
3108 input0Slot->Connect(layer->GetInputSlot(0));
3109 input1Slot->Connect(layer->GetInputSlot(1));
3110
Nattapat Chaimanowongfab64f02019-02-15 16:46:24 +00003111 if (input0Info.GetNumDimensions() == input1Info.GetNumDimensions())
3112 {
3113 const TensorShape& input0Shape = input0Info.GetShape();
3114 const TensorShape& input1Shape = input1Info.GetShape();
3115
3116 std::vector<unsigned int> outputShape;
3117 outputShape.reserve(input0Shape.GetNumDimensions());
3118 TensorInfo outputInfo(input0Info);
3119
3120 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3121 {
3122 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3123 }
3124
3125 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3126
3127 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3128 }
3129 else if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
surmeh01bceff2f2018-03-29 16:29:27 +01003130 {
3131 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3132 }
3133 else
3134 {
3135 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3136 }
3137
3138 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3139}
3140
saoste01bbd40612018-08-28 15:41:51 +01003141ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
3142{
3143 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3144
3145 IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
3146 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3147 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3148
3149 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3150 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3151
3152
3153 if (input0NumDims < input1NumDims)
3154 {
3155 const bool isNHWC = true;
3156 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3157 }
3158 if (input1NumDims < input0NumDims)
3159 {
3160 const bool isNHWC = true;
3161 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3162 }
3163
3164 input0Slot->Connect(layer->GetInputSlot(0));
3165 input1Slot->Connect(layer->GetInputSlot(1));
3166
3167 if (input0NumDims < input1NumDims)
3168 {
3169 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3170 }
3171 else
3172 {
3173 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3174
3175 }
3176 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3177}
3178
Sadik Armagan975c09a2018-12-04 10:02:08 +00003179ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
3180{
3181 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3182
3183 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3184 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3185
3186 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3187 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3188
3189 if (input0NumDims < input1NumDims)
3190 {
3191 const bool isNHWC = true;
3192 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3193 }
3194 if (input1NumDims < input0NumDims)
3195 {
3196 const bool isNHWC = true;
3197 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3198 }
3199
3200 IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
3201
3202 input0Slot->Connect(layer->GetInputSlot(0));
3203 input1Slot->Connect(layer->GetInputSlot(1));
3204
3205 TensorInfo outputInfo = input0Slot->GetTensorInfo();
3206 std::vector<unsigned int> outputShape;
3207
3208 const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
3209 const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
3210
3211 for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3212 {
3213 outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3214 }
3215
3216 outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3217 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3218
3219 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3220}
3221
telsoa01c577f2c2018-08-31 09:22:23 +01003222IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
3223{
3224 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3225
3226 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
3227 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3228 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3229
3230 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3231 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3232
3233 if (input0NumDims < input1NumDims)
3234 {
3235 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003236 input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003237 }
3238 if (input1NumDims < input0NumDims)
3239 {
3240 const bool isNHWC = true;
saoste01bbd40612018-08-28 15:41:51 +01003241 input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
telsoa01c577f2c2018-08-31 09:22:23 +01003242 }
3243
3244 input0Slot->Connect(layer->GetInputSlot(0));
3245 input1Slot->Connect(layer->GetInputSlot(1));
3246
3247 if (input0NumDims < input1NumDims)
3248 {
3249 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3250 }
3251 else
3252 {
3253 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3254 }
3255 return layer;
3256}
3257
surmeh01bceff2f2018-03-29 16:29:27 +01003258IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
3259 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
3260{
telsoa01c577f2c2018-08-31 09:22:23 +01003261 // Finds bias const (if applicable).
surmeh01bceff2f2018-03-29 16:29:27 +01003262 ParsedConstTfOperation<float>* biasNode = nullptr;
3263 if (addNodeDef != nullptr)
3264 {
3265 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
telsoa01c577f2c2018-08-31 09:22:23 +01003266 // Finds our inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003267 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3268 {
Jan Eilersbb446e52020-04-02 13:56:54 +01003269 biasNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01003270 }
3271 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3272 {
Jan Eilersbb446e52020-04-02 13:56:54 +01003273 biasNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01003274 }
3275 else
3276 {
telsoa01c577f2c2018-08-31 09:22:23 +01003277 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003278 fmt::format("ArmNN only supports fully connected layers with constant bias. "
3279 "Inputs {} and {}. AddNode {}. MatMulNode {} {}",
3280 addInputs[0].m_IndexedValue->GetNode().name(),
3281 addInputs[1].m_IndexedValue->GetNode().name(),
3282 addNodeDef->name(),
3283 matMulNodeDef.name(),
3284 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003285 }
3286 }
3287
telsoa01c577f2c2018-08-31 09:22:23 +01003288 // Finds matmul inputs.
surmeh01bceff2f2018-03-29 16:29:27 +01003289 ParsedConstTfOperation<float>* weightNode = nullptr;
3290 ParsedTfOperation* inputNode = nullptr;
3291 unsigned int inputIdx = 0;
3292 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3293 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3294 {
Jan Eilersbb446e52020-04-02 13:56:54 +01003295 weightNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01003296 inputNode = mulInputs[1].m_IndexedValue;
3297 inputIdx = mulInputs[1].m_Index;
3298 }
3299 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3300 {
Jan Eilersbb446e52020-04-02 13:56:54 +01003301 weightNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
surmeh01bceff2f2018-03-29 16:29:27 +01003302 inputNode = mulInputs[0].m_IndexedValue;
3303 inputIdx = mulInputs[0].m_Index;
3304 }
3305 else
3306 {
telsoa01c577f2c2018-08-31 09:22:23 +01003307 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003308 fmt::format("ArmNN only supports fully connected layers with constant weights. "
3309 "Inputs {} and {}. MatMulNode {} {}",
3310 mulInputs[0].m_IndexedValue->GetNode().name(),
3311 mulInputs[1].m_IndexedValue->GetNode().name(),
3312 matMulNodeDef.name(),
3313 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003314 }
3315
3316 std::vector<float> weightTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003317 // Handles weight.
Matteo Martincigh482ca852018-12-12 09:20:55 +00003318 ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003319
3320 FullyConnectedDescriptor desc;
3321 desc.m_BiasEnabled = addNodeDef != nullptr;
3322
3323 IConnectableLayer* layer = nullptr;
Matteo Martincighfc598e12019-05-14 10:36:13 +01003324 Optional<ConstTensor> optionalBiases;
3325 std::vector<float> biasTensorData;
telsoa01c577f2c2018-08-31 09:22:23 +01003326 // Makes the layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003327 if (addNodeDef != nullptr)
3328 {
Matteo Martincigh482ca852018-12-12 09:20:55 +00003329 ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
surmeh01bceff2f2018-03-29 16:29:27 +01003330
3331 if (weights.GetShape()[1] != biases.GetShape()[0])
3332 {
telsoa01c577f2c2018-08-31 09:22:23 +01003333 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003334 fmt::format("Shape of matmul weights and bias do not match. "
3335 "AddNode {}. MatMulNode {} {}",
3336 addNodeDef->name(),
3337 matMulNodeDef.name(),
3338 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003339 }
3340
Matteo Martincighfc598e12019-05-14 10:36:13 +01003341 optionalBiases = Optional<ConstTensor>(biases);
surmeh01bceff2f2018-03-29 16:29:27 +01003342 }
Matteo Martincighfc598e12019-05-14 10:36:13 +01003343 layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
surmeh01bceff2f2018-03-29 16:29:27 +01003344
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01003345 ARMNN_ASSERT(layer != nullptr);
surmeh01bceff2f2018-03-29 16:29:27 +01003346
3347 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3348 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3349
telsoa01c577f2c2018-08-31 09:22:23 +01003350 // Handles output.
surmeh01bceff2f2018-03-29 16:29:27 +01003351 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3352 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3353 return layer;
3354}
3355
3356void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
3357{
telsoa01c577f2c2018-08-31 09:22:23 +01003358 // Gets the type of the node (assume float).
surmeh01bceff2f2018-03-29 16:29:27 +01003359 tensorflow::DataType type = tensorflow::DT_FLOAT;
3360 if (nodeDef.attr().count("T") != 0)
3361 {
3362 auto attr = nodeDef.attr().at("T");
3363 type = attr.type();
3364 }
3365 else if (nodeDef.attr().count("dtype") != 0)
3366 {
3367 auto attr = nodeDef.attr().at("dtype");
3368 type = attr.type();
3369 }
3370
Ferran Balaguerc602f292019-02-08 17:09:55 +00003371 if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
surmeh01bceff2f2018-03-29 16:29:27 +01003372 {
telsoa01c577f2c2018-08-31 09:22:23 +01003373 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003374 fmt::format("Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
3375 "Got {} for Node {} {}",
3376 tensorflow::DataType_Name(type),
3377 nodeDef.name(),
3378 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003379 }
3380
3381 const std::string& operation = nodeDef.op();
narpra016f37f832018-12-21 18:30:00 +00003382 auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3383 if (itControlInput != m_ControlInputs.end())
3384 {
3385 // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3386 return;
3387 }
surmeh01bceff2f2018-03-29 16:29:27 +01003388 auto it = ms_OperationNameToParsingFunctions.find(operation);
3389 if (it != ms_OperationNameToParsingFunctions.end())
3390 {
3391 auto func = it->second;
3392 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3393 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3394
telsoa01c577f2c2018-08-31 09:22:23 +01003395 // Stores the parsed operation so that dependent layers can connect to it.
surmeh01bceff2f2018-03-29 16:29:27 +01003396 auto it = m_ParsedTfOperations.find(nodeDef.name());
3397 if (it != m_ParsedTfOperations.end())
3398 {
James Ward58dec6b2020-09-11 17:32:44 +01003399 throw ParseException(fmt::format("Name {} used by more than one node", nodeDef.name()));
surmeh01bceff2f2018-03-29 16:29:27 +01003400 }
3401 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3402
telsoa01c577f2c2018-08-31 09:22:23 +01003403 // If this node was requested as an output from the network, then adds an ArmNN output layer.
surmeh01bceff2f2018-03-29 16:29:27 +01003404 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3405 m_RequestedOutputs.end())
3406 {
3407 auto outId = ParseOutputId(nodeDef.name());
Matthew Sloyan589e3e82020-09-11 16:17:48 +01003408 const LayerBindingId layerId = armnn::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
surmeh01bceff2f2018-03-29 16:29:27 +01003409 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3410
3411 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3412
3413 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3414
3415 prevSlot.Connect(outputLayer->GetInputSlot(0));
3416
3417 TrackOutputBinding(outputLayer, layerId, tensorInfo);
3418 }
3419 }
3420 else
3421 {
telsoa01c577f2c2018-08-31 09:22:23 +01003422 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003423 fmt::format("Unsupported operation {} in tensorflow::GraphDef {}",
3424 operation,
3425 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003426 }
3427}
3428
3429void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
3430{
telsoa01c577f2c2018-08-31 09:22:23 +01003431 // Adds all nodes to our map.
surmeh01bceff2f2018-03-29 16:29:27 +01003432 m_NodesByName.clear();
3433 m_NetworkInputsBindingInfo.clear();
3434 m_NetworkOutputsBindingInfo.clear();
3435
3436 for (int i = 0; i < graphDef.node_size(); ++i)
3437 {
3438 const tensorflow::NodeDef& node = graphDef.node(i);
3439 m_NodesByName[node.name()] = &node;
3440 }
3441
Francis Murtaghbb190a62019-04-04 11:16:29 +01003442 // Checks that the input nodes the user has requested exist.
3443 for (const auto& pair : m_InputShapes)
3444 {
3445 const std::string& requestedInputName = pair.first;
3446 auto nodeIt = m_NodesByName.find(requestedInputName);
3447 if (nodeIt == m_NodesByName.end())
3448 {
3449 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003450 fmt::format("Couldn't find requested input node '{}' in graph {}",
3451 requestedInputName,
3452 CHECK_LOCATION().AsString()));
Francis Murtaghbb190a62019-04-04 11:16:29 +01003453 }
3454 }
3455
telsoa01c577f2c2018-08-31 09:22:23 +01003456 // Finds the output nodes the user requested.
surmeh01bceff2f2018-03-29 16:29:27 +01003457 std::vector<const tensorflow::NodeDef*> targetNodes;
3458 for (const std::string& requestedOutputName : m_RequestedOutputs)
3459 {
3460 auto nodeIt = m_NodesByName.find(requestedOutputName);
3461 if (nodeIt == m_NodesByName.end())
3462 {
telsoa01c577f2c2018-08-31 09:22:23 +01003463 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003464 fmt::format("Couldn't find requested output node '{}' in graph {}",
3465 requestedOutputName,
3466 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003467 }
3468 targetNodes.push_back(nodeIt->second);
3469 }
3470
telsoa01c577f2c2018-08-31 09:22:23 +01003471 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003472 std::vector<const tensorflow::NodeDef*> sortedNodes;
3473 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3474 targetNodes,
3475 [this](const tensorflow::NodeDef* node)
3476 {
3477 auto outputs = GetTfInputNodes(*node);
3478 std::vector<const tensorflow::NodeDef*> nodesOnly;
3479 for (const auto & o : outputs) {
3480 nodesOnly.push_back(o.m_IndexedValue);
3481 }
3482 return nodesOnly;
3483 },
3484 sortedNodes))
3485 {
telsoa01c577f2c2018-08-31 09:22:23 +01003486 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003487 fmt::format("Cycle detected in graph {}",
3488 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003489 }
3490
telsoa01c577f2c2018-08-31 09:22:23 +01003491 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
surmeh01bceff2f2018-03-29 16:29:27 +01003492 for (const auto& it : sortedNodes)
3493 {
3494 const tensorflow::NodeDef& currentNode = *it;
3495 LoadNodeDef(currentNode, graphDef);
3496 }
3497}
3498
3499INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
3500 const std::map<std::string, TensorShape>& inputShapes,
3501 const std::vector<std::string>& requestedOutputs)
3502{
3503 FILE* fd = fopen(graphFile, "r");
3504
3505 if (fd == nullptr)
3506 {
telsoa01c577f2c2018-08-31 09:22:23 +01003507 throw FileNotFoundException(
James Ward58dec6b2020-09-11 17:32:44 +01003508 fmt::format("Graph file {} failed to open {}",
3509 graphFile,
3510 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003511 }
3512
telsoa01c577f2c2018-08-31 09:22:23 +01003513 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003514 tensorflow::GraphDef graphDef;
3515 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3516 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3517 delete input;
3518 fclose(fd);
3519
3520 if (!success)
3521 {
telsoa01c577f2c2018-08-31 09:22:23 +01003522 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003523 fmt::format("Failed to parse graph file {}",
3524 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003525 }
3526
3527 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3528}
3529
3530INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
3531 const std::map<std::string, TensorShape>& inputShapes,
3532 const std::vector<std::string>& requestedOutputs)
3533{
telsoa01c577f2c2018-08-31 09:22:23 +01003534 // Parses the string into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003535 tensorflow::GraphDef graphDef;
3536 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3537
3538 if (!success)
3539 {
telsoa01c577f2c2018-08-31 09:22:23 +01003540 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003541 fmt::format("Failed to parse graph file {}",
3542 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003543 }
3544
3545 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3546}
3547
3548INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
3549 const std::map<std::string, TensorShape>& inputShapes,
3550 const std::vector<std::string>& requestedOutputs)
3551{
3552 FILE* fd = fopen(graphFile, "rb");
3553
3554 if (fd == nullptr)
3555 {
telsoa01c577f2c2018-08-31 09:22:23 +01003556 throw FileNotFoundException(
James Ward58dec6b2020-09-11 17:32:44 +01003557 fmt::format("Graph file {} failed to open {}",
3558 graphFile,
3559 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003560 }
3561
telsoa01c577f2c2018-08-31 09:22:23 +01003562 // Parses the file into a message.
surmeh01bceff2f2018-03-29 16:29:27 +01003563 tensorflow::GraphDef graphDef;
3564
3565 google::protobuf::io::FileInputStream inStream(fileno(fd));
3566 google::protobuf::io::CodedInputStream codedStream(&inStream);
Nikhil Raje5181532020-10-09 14:52:25 +01003567 codedStream.SetTotalBytesLimit(INT_MAX);
surmeh01bceff2f2018-03-29 16:29:27 +01003568 bool success = graphDef.ParseFromCodedStream(&codedStream);
3569 fclose(fd);
3570
3571 if (!success)
3572 {
telsoa01c577f2c2018-08-31 09:22:23 +01003573 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003574 fmt::format("Failed to parse protobuf file {} {}",
3575 graphFile,
3576 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003577 }
3578
3579 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3580}
3581
3582INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
3583 const std::map<std::string, TensorShape>& inputShapes,
3584 const std::vector<std::string>& requestedOutputs)
3585{
3586 m_Network = INetwork::Create();
3587
3588 m_InputShapes = inputShapes;
3589 if (requestedOutputs.size() == 0)
3590 {
telsoa01c577f2c2018-08-31 09:22:23 +01003591 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003592 fmt::format("requestedOutputs must have at least one entry {}",
3593 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003594 }
3595 m_RequestedOutputs = requestedOutputs;
3596
3597 try
3598 {
3599 LoadGraphDef(graphDef);
3600 }
3601 catch (const ParseException& e)
3602 {
3603 Cleanup();
3604 throw e;
3605 }
3606
3607 Cleanup();
3608
3609 return std::move(m_Network);
3610}
3611
3612void TfParser::Cleanup()
3613{
telsoa01c577f2c2018-08-31 09:22:23 +01003614 // Cleanup, in case we reuse this parser.
surmeh01bceff2f2018-03-29 16:29:27 +01003615 m_InputShapes.clear();
3616 m_RequestedOutputs.clear();
3617 m_NodesByName.clear();
3618 m_ParsedTfOperations.clear();
3619}
3620
3621BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
3622{
3623 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3624}
3625
3626BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
3627{
3628 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3629}
3630
3631std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
3632 const char* bindingPointDesc,
3633 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3634{
3635 auto it = nameToBindingInfo.find(layerName);
3636 if (it == nameToBindingInfo.end())
3637 {
telsoa01c577f2c2018-08-31 09:22:23 +01003638 throw InvalidArgumentException(
James Ward58dec6b2020-09-11 17:32:44 +01003639 fmt::format("Unknown {} '{}' {}",
3640 bindingPointDesc,
3641 layerName,
3642 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003643 }
3644 return it->second;
3645}
3646
3647void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3648{
3649 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3650}
3651
3652void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
3653{
3654 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3655}
3656
3657void TfParser::TrackBindingPoint(IConnectableLayer* layer,
3658 LayerBindingId id,
3659 const TensorInfo& tensorInfo,
3660 const char* bindingPointDesc,
3661 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3662{
3663 const std::string layerName = layer->GetName();
3664 auto it = nameToBindingInfo.find(layerName);
3665 if (it == nameToBindingInfo.end())
3666 {
3667 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3668 }
3669 else
3670 {
telsoa01c577f2c2018-08-31 09:22:23 +01003671 throw ParseException(
James Ward58dec6b2020-09-11 17:32:44 +01003672 fmt::format("Id {} used by more than one {} layer {}",
3673 id,
3674 bindingPointDesc,
3675 CHECK_LOCATION().AsString()));
surmeh01bceff2f2018-03-29 16:29:27 +01003676 }
3677}
3678
3679} // namespace armnnTfParser